diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -873,14 +873,18 @@ ManualCodegen = [{ { // builtin: (val0 address, val1 address, ..., ptr, vl) - IntrinsicTypes = {ConvertType(E->getArg(0)->getType()->getPointeeType()), - Ops[NF + 1]->getType()}; - // intrinsic: (ptr, vl) - llvm::Value *Operands[] = {Ops[NF], Ops[NF + 1]}; + ResultType = ConvertType(E->getArg(0)->getType()->getPointeeType()); + IntrinsicTypes = {ResultType, Ops[NF + 1]->getType()}; + // intrinsic: (passthru0, passthru1, ..., ptr, vl) + SmallVector Operands; + for (unsigned I = 0; I < NF; ++I) + Operands.push_back(llvm::UndefValue::get(ResultType)); + Operands.push_back(Ops[NF]); + Operands.push_back(Ops[NF + 1]); llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); clang::CharUnits Align = - CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType()); + CGM.getNaturalPointeeTypeAlignment(E->getArg(NF)->getType()); llvm::Value *V; for (unsigned I = 0; I < NF; ++I) { llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {I}); @@ -944,10 +948,14 @@ ManualCodegen = [{ { // builtin: (val0 address, val1 address, ..., ptr, new_vl, vl) - IntrinsicTypes = {ConvertType(E->getArg(0)->getType()->getPointeeType()), - Ops[NF + 2]->getType()}; - // intrinsic: (ptr, vl) - llvm::Value *Operands[] = {Ops[NF], Ops[NF + 2]}; + ResultType = ConvertType(E->getArg(0)->getType()->getPointeeType()); + IntrinsicTypes = {ResultType, Ops[NF + 2]->getType()}; + // intrinsic: (passthru0, passthru1, ..., ptr, vl) + SmallVector Operands; + for (unsigned I = 0; I < NF; ++I) + Operands.push_back(llvm::UndefValue::get(ResultType)); + Operands.push_back(Ops[NF]); + Operands.push_back(Ops[NF + 2]); Value *NewVL = Ops[NF + 1]; llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); @@ -1019,10 +1027,15 @@ ManualCodegen = [{ { // builtin: (val0 address, val1 address, ..., ptr, stride, vl) - IntrinsicTypes = {ConvertType(E->getArg(0)->getType()->getPointeeType()), - Ops[NF + 2]->getType()}; - // intrinsic: (ptr, stride, vl) - llvm::Value *Operands[] = {Ops[NF], Ops[NF + 1], Ops[NF + 2]}; + ResultType = ConvertType(E->getArg(0)->getType()->getPointeeType()); + IntrinsicTypes = {ResultType, Ops[NF + 2]->getType()}; + // intrinsic: (passthru0, passthru1, ..., ptr, stride, vl) + SmallVector Operands; + for (unsigned I = 0; I < NF; ++I) + Operands.push_back(llvm::UndefValue::get(ResultType)); + Operands.push_back(Ops[NF]); + Operands.push_back(Ops[NF + 1]); + Operands.push_back(Ops[NF + 2]); llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); clang::CharUnits Align = @@ -1086,10 +1099,15 @@ ManualCodegen = [{ { // builtin: (val0 address, val1 address, ..., ptr, index, vl) - IntrinsicTypes = {ConvertType(E->getArg(0)->getType()->getPointeeType()), - Ops[NF + 1]->getType(), Ops[NF + 2]->getType()}; - // intrinsic: (ptr, index, vl) - llvm::Value *Operands[] = {Ops[NF], Ops[NF + 1], Ops[NF + 2]}; + ResultType = ConvertType(E->getArg(0)->getType()->getPointeeType()); + IntrinsicTypes = {ResultType, Ops[NF + 1]->getType(), Ops[NF + 2]->getType()}; + // intrinsic: (passthru0, passthru1, ..., ptr, index, vl) + SmallVector Operands; + for (unsigned I = 0; I < NF; ++I) + Operands.push_back(llvm::UndefValue::get(ResultType)); + Operands.push_back(Ops[NF]); + Operands.push_back(Ops[NF + 1]); + Operands.push_back(Ops[NF + 2]); llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); clang::CharUnits Align = diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -22,7 +22,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -73,7 +73,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -94,7 +94,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -155,7 +155,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv32i8.nxv32i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv32i8.nxv32i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -200,7 +200,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -228,7 +228,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -245,7 +245,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -264,7 +264,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -308,7 +308,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -346,7 +346,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -361,7 +361,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv32i8.nxv32i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv32i8.nxv32i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -391,7 +391,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -404,7 +404,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -419,7 +419,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -436,7 +436,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -455,7 +455,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -476,7 +476,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -499,7 +499,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -524,7 +524,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -552,7 +552,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -569,7 +569,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -597,7 +597,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -614,7 +614,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -633,7 +633,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -677,7 +677,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -715,7 +715,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -730,7 +730,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -766,7 +766,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -787,7 +787,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -835,7 +835,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -848,7 +848,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -863,7 +863,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -880,7 +880,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -893,7 +893,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -906,7 +906,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -921,7 +921,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -938,7 +938,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -957,7 +957,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -978,7 +978,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1001,7 +1001,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1039,7 +1039,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1054,7 +1054,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1071,7 +1071,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1084,7 +1084,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1097,7 +1097,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1112,7 +1112,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1129,7 +1129,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1148,7 +1148,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1169,7 +1169,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1192,7 +1192,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1217,7 +1217,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1230,7 +1230,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1245,7 +1245,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1262,7 +1262,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1275,7 +1275,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1288,7 +1288,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1303,7 +1303,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1320,7 +1320,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1339,7 +1339,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1360,7 +1360,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1383,7 +1383,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1408,7 +1408,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1421,7 +1421,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1436,7 +1436,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1453,7 +1453,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1466,7 +1466,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1481,7 +1481,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1498,7 +1498,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1517,7 +1517,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1538,7 +1538,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1561,7 +1561,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1586,7 +1586,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1599,7 +1599,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1614,7 +1614,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1631,7 +1631,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1644,7 +1644,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1657,7 +1657,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1672,7 +1672,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1689,7 +1689,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1708,7 +1708,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1729,7 +1729,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1752,7 +1752,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1777,7 +1777,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1790,7 +1790,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1805,7 +1805,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1822,7 +1822,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1835,7 +1835,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1848,7 +1848,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1863,7 +1863,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1880,7 +1880,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1899,7 +1899,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1920,7 +1920,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1943,7 +1943,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1968,7 +1968,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1981,7 +1981,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1996,7 +1996,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2013,7 +2013,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2026,7 +2026,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2039,7 +2039,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2054,7 +2054,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2071,7 +2071,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2090,7 +2090,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2111,7 +2111,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2134,7 +2134,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2159,7 +2159,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2172,7 +2172,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2187,7 +2187,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2204,7 +2204,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2217,7 +2217,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2230,7 +2230,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i8.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2245,7 +2245,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2262,7 +2262,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2281,7 +2281,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2302,7 +2302,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2325,7 +2325,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2350,7 +2350,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2363,7 +2363,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i8.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2378,7 +2378,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i8.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2395,7 +2395,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2408,7 +2408,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2421,7 +2421,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i16.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2436,7 +2436,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2453,7 +2453,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2472,7 +2472,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2493,7 +2493,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2516,7 +2516,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2541,7 +2541,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2554,7 +2554,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i16.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2569,7 +2569,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i16.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2586,7 +2586,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2599,7 +2599,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2612,7 +2612,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i32.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2627,7 +2627,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2644,7 +2644,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2663,7 +2663,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2684,7 +2684,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2707,7 +2707,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2732,7 +2732,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2745,7 +2745,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i32.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2760,7 +2760,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i32.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2777,7 +2777,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2790,7 +2790,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2803,7 +2803,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i64.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2818,7 +2818,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2835,7 +2835,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2854,7 +2854,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2875,7 +2875,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2898,7 +2898,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2923,7 +2923,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2936,7 +2936,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i64.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2951,7 +2951,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2968,7 +2968,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2981,7 +2981,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2994,7 +2994,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3009,7 +3009,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3026,7 +3026,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3045,7 +3045,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3066,7 +3066,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3089,7 +3089,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3114,7 +3114,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3127,7 +3127,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3142,7 +3142,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3159,7 +3159,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv32i8.nxv32i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv32i8.nxv32i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3172,7 +3172,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3185,7 +3185,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3200,7 +3200,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3217,7 +3217,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3236,7 +3236,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3257,7 +3257,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3280,7 +3280,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3305,7 +3305,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3318,7 +3318,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3333,7 +3333,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3350,7 +3350,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv32i8.nxv32i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv32i8.nxv32i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3363,7 +3363,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3376,7 +3376,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3391,7 +3391,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3408,7 +3408,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3427,7 +3427,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3448,7 +3448,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3471,7 +3471,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3496,7 +3496,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3509,7 +3509,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3524,7 +3524,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3541,7 +3541,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3554,7 +3554,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3569,7 +3569,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3586,7 +3586,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3605,7 +3605,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3626,7 +3626,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3649,7 +3649,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3674,7 +3674,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3687,7 +3687,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3702,7 +3702,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3719,7 +3719,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3738,7 +3738,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3759,7 +3759,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3782,7 +3782,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3807,7 +3807,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3820,7 +3820,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3835,7 +3835,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3852,7 +3852,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3865,7 +3865,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3878,7 +3878,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3893,7 +3893,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3910,7 +3910,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3929,7 +3929,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3950,7 +3950,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3973,7 +3973,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3998,7 +3998,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4011,7 +4011,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4026,7 +4026,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4043,7 +4043,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4056,7 +4056,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4069,7 +4069,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4084,7 +4084,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4101,7 +4101,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4120,7 +4120,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4141,7 +4141,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4164,7 +4164,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4189,7 +4189,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4202,7 +4202,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4217,7 +4217,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4234,7 +4234,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4247,7 +4247,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4260,7 +4260,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4275,7 +4275,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4292,7 +4292,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4311,7 +4311,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4332,7 +4332,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4355,7 +4355,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4380,7 +4380,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4393,7 +4393,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4408,7 +4408,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4425,7 +4425,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4438,7 +4438,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4453,7 +4453,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4470,7 +4470,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4489,7 +4489,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4510,7 +4510,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4533,7 +4533,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4558,7 +4558,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4571,7 +4571,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4586,7 +4586,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4603,7 +4603,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4616,7 +4616,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4629,7 +4629,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4644,7 +4644,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4661,7 +4661,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4680,7 +4680,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4701,7 +4701,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4724,7 +4724,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4749,7 +4749,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4762,7 +4762,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4777,7 +4777,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4794,7 +4794,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4807,7 +4807,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4820,7 +4820,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4835,7 +4835,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4852,7 +4852,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4871,7 +4871,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4892,7 +4892,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4915,7 +4915,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4940,7 +4940,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4953,7 +4953,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4968,7 +4968,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4985,7 +4985,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4998,7 +4998,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5011,7 +5011,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5026,7 +5026,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5043,7 +5043,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5062,7 +5062,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5083,7 +5083,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5106,7 +5106,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5131,7 +5131,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5144,7 +5144,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5159,7 +5159,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5176,7 +5176,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5189,7 +5189,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5202,7 +5202,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i8.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5217,7 +5217,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5234,7 +5234,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5253,7 +5253,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5274,7 +5274,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5297,7 +5297,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5322,7 +5322,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5335,7 +5335,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i8.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5350,7 +5350,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i8.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5367,7 +5367,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5380,7 +5380,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5393,7 +5393,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i16.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5408,7 +5408,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5425,7 +5425,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5444,7 +5444,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5465,7 +5465,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5488,7 +5488,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5513,7 +5513,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5526,7 +5526,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i16.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5541,7 +5541,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i16.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5558,7 +5558,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5571,7 +5571,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5584,7 +5584,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i32.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5599,7 +5599,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5616,7 +5616,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5635,7 +5635,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5656,7 +5656,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5679,7 +5679,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5704,7 +5704,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5717,7 +5717,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i32.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5732,7 +5732,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i32.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5749,7 +5749,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5762,7 +5762,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5775,7 +5775,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i64.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5790,7 +5790,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5807,7 +5807,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5826,7 +5826,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5847,7 +5847,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5870,7 +5870,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5895,7 +5895,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5908,7 +5908,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i64.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5923,7 +5923,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5940,7 +5940,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5953,7 +5953,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i8.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5966,7 +5966,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i8.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5981,7 +5981,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i8.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5998,7 +5998,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i8.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6017,7 +6017,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6038,7 +6038,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6061,7 +6061,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6086,7 +6086,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i8.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6099,7 +6099,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i8.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6114,7 +6114,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i8.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6131,7 +6131,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i8.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6144,7 +6144,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i16.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6157,7 +6157,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i16.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6172,7 +6172,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i16.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6189,7 +6189,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i16.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6208,7 +6208,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6229,7 +6229,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6252,7 +6252,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6277,7 +6277,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i16.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6290,7 +6290,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i16.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6305,7 +6305,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i16.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6322,7 +6322,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i16.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6335,7 +6335,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i32.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6348,7 +6348,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i32.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6363,7 +6363,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6380,7 +6380,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i32.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6399,7 +6399,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6420,7 +6420,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6443,7 +6443,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6468,7 +6468,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i32.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6481,7 +6481,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i32.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6496,7 +6496,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6513,7 +6513,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i32.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6526,7 +6526,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i64.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6539,7 +6539,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i64.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6554,7 +6554,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i64.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6571,7 +6571,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i64.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6590,7 +6590,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6611,7 +6611,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6634,7 +6634,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6659,7 +6659,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i64.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6672,7 +6672,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i64.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6687,7 +6687,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i64.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6704,7 +6704,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i64.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6717,7 +6717,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i8.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6730,7 +6730,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i8.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6745,7 +6745,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i8.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6762,7 +6762,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i8.i64( undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6781,7 +6781,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6802,7 +6802,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6825,7 +6825,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6850,7 +6850,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i8.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6863,7 +6863,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i8.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6878,7 +6878,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i8.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6895,7 +6895,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i8.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6908,7 +6908,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i16.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6921,7 +6921,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i16.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6936,7 +6936,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i16.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6953,7 +6953,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i16.i64( undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6972,7 +6972,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6993,7 +6993,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7016,7 +7016,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7041,7 +7041,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i16.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7054,7 +7054,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i16.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7069,7 +7069,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i16.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7086,7 +7086,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i16.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7099,7 +7099,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i32.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7112,7 +7112,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i32.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7127,7 +7127,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i32.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7144,7 +7144,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i32.i64( undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7163,7 +7163,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7184,7 +7184,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7207,7 +7207,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7232,7 +7232,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i32.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7245,7 +7245,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i32.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7260,7 +7260,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i32.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7277,7 +7277,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i32.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7290,7 +7290,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i64.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7303,7 +7303,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i64.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7318,7 +7318,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i64.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7335,7 +7335,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i64.i64( undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7354,7 +7354,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7375,7 +7375,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7398,7 +7398,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7423,7 +7423,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i64.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7436,7 +7436,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i64.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7451,7 +7451,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i64.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7468,7 +7468,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i64.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mf.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mf.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mf.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -22,7 +22,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -73,7 +73,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -94,7 +94,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -155,7 +155,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -227,7 +227,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -275,7 +275,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -320,7 +320,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -383,7 +383,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -408,7 +408,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -421,7 +421,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -436,7 +436,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -453,7 +453,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -472,7 +472,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -493,7 +493,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -516,7 +516,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -541,7 +541,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -554,7 +554,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -569,7 +569,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -586,7 +586,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -605,7 +605,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -626,7 +626,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -649,7 +649,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -674,7 +674,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -687,7 +687,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -719,7 +719,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -759,7 +759,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -782,7 +782,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -807,7 +807,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -820,7 +820,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -835,7 +835,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -852,7 +852,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -871,7 +871,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -892,7 +892,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -915,7 +915,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -940,7 +940,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -953,7 +953,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -968,7 +968,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -985,7 +985,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1004,7 +1004,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1025,7 +1025,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1048,7 +1048,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1073,7 +1073,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1086,7 +1086,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1101,7 +1101,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1118,7 +1118,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1137,7 +1137,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1158,7 +1158,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1181,7 +1181,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1206,7 +1206,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1219,7 +1219,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1234,7 +1234,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1251,7 +1251,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1270,7 +1270,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1291,7 +1291,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1314,7 +1314,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1339,7 +1339,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1352,7 +1352,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1367,7 +1367,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1384,7 +1384,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1403,7 +1403,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1424,7 +1424,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1447,7 +1447,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1472,7 +1472,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1485,7 +1485,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1500,7 +1500,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1517,7 +1517,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1536,7 +1536,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1557,7 +1557,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1580,7 +1580,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1605,7 +1605,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1618,7 +1618,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1633,7 +1633,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1650,7 +1650,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1669,7 +1669,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1690,7 +1690,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1713,7 +1713,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1738,7 +1738,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1751,7 +1751,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1766,7 +1766,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1783,7 +1783,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1802,7 +1802,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1823,7 +1823,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1846,7 +1846,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1871,7 +1871,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1884,7 +1884,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1899,7 +1899,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1916,7 +1916,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1935,7 +1935,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1956,7 +1956,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1979,7 +1979,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2004,7 +2004,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2017,7 +2017,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2032,7 +2032,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2049,7 +2049,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2068,7 +2068,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2089,7 +2089,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2112,7 +2112,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2137,7 +2137,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2150,7 +2150,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2165,7 +2165,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2182,7 +2182,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2201,7 +2201,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2222,7 +2222,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2245,7 +2245,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2270,7 +2270,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2283,7 +2283,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2298,7 +2298,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2315,7 +2315,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2334,7 +2334,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2355,7 +2355,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2378,7 +2378,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2403,7 +2403,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2416,7 +2416,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2431,7 +2431,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2448,7 +2448,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2467,7 +2467,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2488,7 +2488,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2511,7 +2511,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2536,7 +2536,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2549,7 +2549,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2564,7 +2564,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2581,7 +2581,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2600,7 +2600,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2621,7 +2621,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2644,7 +2644,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2669,7 +2669,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2682,7 +2682,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2697,7 +2697,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2714,7 +2714,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2733,7 +2733,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2754,7 +2754,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2777,7 +2777,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2802,7 +2802,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2815,7 +2815,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2830,7 +2830,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2847,7 +2847,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2866,7 +2866,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2887,7 +2887,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2910,7 +2910,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2935,7 +2935,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2948,7 +2948,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2963,7 +2963,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2980,7 +2980,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2999,7 +2999,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3020,7 +3020,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3043,7 +3043,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3068,7 +3068,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3081,7 +3081,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3096,7 +3096,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3113,7 +3113,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3132,7 +3132,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3153,7 +3153,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3176,7 +3176,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3201,7 +3201,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3214,7 +3214,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3229,7 +3229,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3246,7 +3246,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3265,7 +3265,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3286,7 +3286,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3309,7 +3309,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3334,7 +3334,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3347,7 +3347,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3362,7 +3362,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3379,7 +3379,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3398,7 +3398,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3419,7 +3419,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3442,7 +3442,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3467,7 +3467,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3480,7 +3480,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3495,7 +3495,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3512,7 +3512,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3531,7 +3531,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3552,7 +3552,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3575,7 +3575,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3600,7 +3600,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3613,7 +3613,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3628,7 +3628,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3645,7 +3645,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3664,7 +3664,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3685,7 +3685,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3708,7 +3708,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3733,7 +3733,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3746,7 +3746,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3761,7 +3761,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3778,7 +3778,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3797,7 +3797,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3818,7 +3818,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3841,7 +3841,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3866,7 +3866,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3879,7 +3879,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3894,7 +3894,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3911,7 +3911,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3930,7 +3930,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3951,7 +3951,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3974,7 +3974,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3999,7 +3999,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4012,7 +4012,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4027,7 +4027,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4044,7 +4044,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4063,7 +4063,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4084,7 +4084,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4107,7 +4107,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4132,7 +4132,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4145,7 +4145,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4160,7 +4160,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4177,7 +4177,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4196,7 +4196,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4217,7 +4217,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4240,7 +4240,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4265,7 +4265,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4278,7 +4278,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4293,7 +4293,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4310,7 +4310,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4329,7 +4329,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4350,7 +4350,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4373,7 +4373,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4398,7 +4398,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4411,7 +4411,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4426,7 +4426,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4443,7 +4443,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4462,7 +4462,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4483,7 +4483,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4506,7 +4506,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4531,7 +4531,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4544,7 +4544,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4559,7 +4559,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4576,7 +4576,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4595,7 +4595,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4616,7 +4616,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4639,7 +4639,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4664,7 +4664,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4677,7 +4677,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4692,7 +4692,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4709,7 +4709,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4728,7 +4728,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4749,7 +4749,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4772,7 +4772,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4797,7 +4797,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4810,7 +4810,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4825,7 +4825,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4842,7 +4842,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4861,7 +4861,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4882,7 +4882,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4905,7 +4905,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4930,7 +4930,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4943,7 +4943,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4958,7 +4958,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4975,7 +4975,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4994,7 +4994,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5015,7 +5015,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5038,7 +5038,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5063,7 +5063,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5076,7 +5076,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5091,7 +5091,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5108,7 +5108,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5127,7 +5127,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5148,7 +5148,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5171,7 +5171,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5196,7 +5196,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5209,7 +5209,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5224,7 +5224,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5241,7 +5241,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5260,7 +5260,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5281,7 +5281,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5304,7 +5304,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5329,7 +5329,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5342,7 +5342,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5357,7 +5357,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5374,7 +5374,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5393,7 +5393,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5414,7 +5414,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5437,7 +5437,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5462,7 +5462,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5475,7 +5475,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5490,7 +5490,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5507,7 +5507,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5526,7 +5526,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5547,7 +5547,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5570,7 +5570,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5595,7 +5595,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5608,7 +5608,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5623,7 +5623,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5640,7 +5640,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5659,7 +5659,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5680,7 +5680,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5703,7 +5703,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5728,7 +5728,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5741,7 +5741,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5756,7 +5756,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5773,7 +5773,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5792,7 +5792,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5813,7 +5813,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5836,7 +5836,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5861,7 +5861,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5874,7 +5874,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5889,7 +5889,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5906,7 +5906,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5925,7 +5925,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5946,7 +5946,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5969,7 +5969,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5994,7 +5994,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6007,7 +6007,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6022,7 +6022,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6039,7 +6039,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6058,7 +6058,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6079,7 +6079,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6102,7 +6102,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6127,7 +6127,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6140,7 +6140,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6155,7 +6155,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6172,7 +6172,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6191,7 +6191,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6212,7 +6212,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6235,7 +6235,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6260,7 +6260,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6273,7 +6273,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6288,7 +6288,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6305,7 +6305,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6324,7 +6324,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6345,7 +6345,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6368,7 +6368,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6393,7 +6393,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i8.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6406,7 +6406,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i8.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6421,7 +6421,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i8.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6438,7 +6438,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i8.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6457,7 +6457,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6478,7 +6478,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6501,7 +6501,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6526,7 +6526,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i16.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6539,7 +6539,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i16.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6554,7 +6554,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i16.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6571,7 +6571,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i16.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6590,7 +6590,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6611,7 +6611,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6634,7 +6634,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6659,7 +6659,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i32.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6672,7 +6672,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i32.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6687,7 +6687,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6704,7 +6704,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i32.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6723,7 +6723,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6744,7 +6744,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6767,7 +6767,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6792,7 +6792,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i64.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6805,7 +6805,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i64.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6820,7 +6820,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i64.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6837,7 +6837,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i64.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6856,7 +6856,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6877,7 +6877,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6900,7 +6900,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -22,7 +22,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -73,7 +73,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -94,7 +94,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -155,7 +155,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv32i8.nxv32i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv32i8.nxv32i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -200,7 +200,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -228,7 +228,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -245,7 +245,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -264,7 +264,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -308,7 +308,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -346,7 +346,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -361,7 +361,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv32i8.nxv32i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv32i8.nxv32i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -391,7 +391,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -404,7 +404,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -419,7 +419,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -436,7 +436,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -455,7 +455,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -476,7 +476,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -499,7 +499,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -524,7 +524,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -552,7 +552,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -569,7 +569,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -597,7 +597,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -614,7 +614,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -633,7 +633,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -677,7 +677,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -715,7 +715,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -730,7 +730,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -766,7 +766,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -787,7 +787,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -835,7 +835,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -848,7 +848,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -863,7 +863,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -880,7 +880,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -893,7 +893,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -906,7 +906,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -921,7 +921,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -938,7 +938,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -957,7 +957,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -978,7 +978,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1001,7 +1001,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1039,7 +1039,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1054,7 +1054,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1071,7 +1071,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1084,7 +1084,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1097,7 +1097,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1112,7 +1112,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1129,7 +1129,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1148,7 +1148,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1169,7 +1169,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1192,7 +1192,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1217,7 +1217,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1230,7 +1230,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1245,7 +1245,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1262,7 +1262,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1275,7 +1275,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1288,7 +1288,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1303,7 +1303,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1320,7 +1320,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1339,7 +1339,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1360,7 +1360,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1383,7 +1383,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1408,7 +1408,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1421,7 +1421,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1436,7 +1436,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1453,7 +1453,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1466,7 +1466,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1481,7 +1481,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1498,7 +1498,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1517,7 +1517,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1538,7 +1538,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1561,7 +1561,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1586,7 +1586,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1599,7 +1599,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1614,7 +1614,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1631,7 +1631,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1644,7 +1644,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1657,7 +1657,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1672,7 +1672,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1689,7 +1689,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1708,7 +1708,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1729,7 +1729,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1752,7 +1752,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1777,7 +1777,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1790,7 +1790,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1805,7 +1805,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1822,7 +1822,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1835,7 +1835,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1848,7 +1848,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1863,7 +1863,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1880,7 +1880,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1899,7 +1899,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1920,7 +1920,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1943,7 +1943,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1968,7 +1968,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1981,7 +1981,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1996,7 +1996,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2013,7 +2013,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2026,7 +2026,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2039,7 +2039,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2054,7 +2054,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2071,7 +2071,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2090,7 +2090,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2111,7 +2111,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2134,7 +2134,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2159,7 +2159,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2172,7 +2172,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2187,7 +2187,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2204,7 +2204,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2217,7 +2217,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2230,7 +2230,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i8.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2245,7 +2245,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2262,7 +2262,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2281,7 +2281,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2302,7 +2302,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2325,7 +2325,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2350,7 +2350,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2363,7 +2363,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i8.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2378,7 +2378,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i8.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2395,7 +2395,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2408,7 +2408,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2421,7 +2421,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i16.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2436,7 +2436,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2453,7 +2453,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2472,7 +2472,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2493,7 +2493,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2516,7 +2516,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2541,7 +2541,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2554,7 +2554,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i16.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2569,7 +2569,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i16.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2586,7 +2586,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2599,7 +2599,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2612,7 +2612,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i32.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2627,7 +2627,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2644,7 +2644,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2663,7 +2663,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2684,7 +2684,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2707,7 +2707,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2732,7 +2732,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2745,7 +2745,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i32.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2760,7 +2760,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i32.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2777,7 +2777,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2790,7 +2790,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2803,7 +2803,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i64.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2818,7 +2818,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2835,7 +2835,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2854,7 +2854,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2875,7 +2875,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2898,7 +2898,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2923,7 +2923,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2936,7 +2936,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i64.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2951,7 +2951,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2968,7 +2968,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2981,7 +2981,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2994,7 +2994,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3009,7 +3009,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3026,7 +3026,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3045,7 +3045,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3066,7 +3066,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3089,7 +3089,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3114,7 +3114,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3127,7 +3127,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3142,7 +3142,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3159,7 +3159,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv32i8.nxv32i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv32i8.nxv32i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3172,7 +3172,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3185,7 +3185,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3200,7 +3200,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3217,7 +3217,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3236,7 +3236,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3257,7 +3257,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3280,7 +3280,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3305,7 +3305,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3318,7 +3318,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3333,7 +3333,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3350,7 +3350,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv32i8.nxv32i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv32i8.nxv32i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3363,7 +3363,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3376,7 +3376,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3391,7 +3391,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3408,7 +3408,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3427,7 +3427,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3448,7 +3448,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3471,7 +3471,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3496,7 +3496,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3509,7 +3509,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3524,7 +3524,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3541,7 +3541,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3554,7 +3554,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3569,7 +3569,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3586,7 +3586,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3605,7 +3605,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3626,7 +3626,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3649,7 +3649,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3674,7 +3674,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3687,7 +3687,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3702,7 +3702,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3719,7 +3719,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3738,7 +3738,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3759,7 +3759,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3782,7 +3782,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3807,7 +3807,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3820,7 +3820,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3835,7 +3835,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3852,7 +3852,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3865,7 +3865,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3878,7 +3878,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3893,7 +3893,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3910,7 +3910,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3929,7 +3929,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3950,7 +3950,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3973,7 +3973,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3998,7 +3998,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4011,7 +4011,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4026,7 +4026,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4043,7 +4043,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4056,7 +4056,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4069,7 +4069,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4084,7 +4084,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4101,7 +4101,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4120,7 +4120,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4141,7 +4141,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4164,7 +4164,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4189,7 +4189,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4202,7 +4202,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4217,7 +4217,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4234,7 +4234,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4247,7 +4247,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4260,7 +4260,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4275,7 +4275,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4292,7 +4292,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4311,7 +4311,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4332,7 +4332,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4355,7 +4355,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4380,7 +4380,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4393,7 +4393,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4408,7 +4408,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4425,7 +4425,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4438,7 +4438,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4453,7 +4453,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4470,7 +4470,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4489,7 +4489,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4510,7 +4510,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4533,7 +4533,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4558,7 +4558,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4571,7 +4571,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4586,7 +4586,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4603,7 +4603,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4616,7 +4616,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4629,7 +4629,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4644,7 +4644,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4661,7 +4661,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4680,7 +4680,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4701,7 +4701,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4724,7 +4724,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4749,7 +4749,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4762,7 +4762,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4777,7 +4777,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4794,7 +4794,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4807,7 +4807,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4820,7 +4820,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4835,7 +4835,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4852,7 +4852,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4871,7 +4871,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4892,7 +4892,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4915,7 +4915,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4940,7 +4940,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4953,7 +4953,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4968,7 +4968,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4985,7 +4985,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4998,7 +4998,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5011,7 +5011,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5026,7 +5026,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5043,7 +5043,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5062,7 +5062,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5083,7 +5083,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5106,7 +5106,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5131,7 +5131,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5144,7 +5144,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5159,7 +5159,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5176,7 +5176,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5189,7 +5189,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5202,7 +5202,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i8.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5217,7 +5217,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5234,7 +5234,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5253,7 +5253,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5274,7 +5274,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5297,7 +5297,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5322,7 +5322,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5335,7 +5335,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i8.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5350,7 +5350,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i8.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5367,7 +5367,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5380,7 +5380,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5393,7 +5393,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i16.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5408,7 +5408,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5425,7 +5425,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5444,7 +5444,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5465,7 +5465,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5488,7 +5488,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5513,7 +5513,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5526,7 +5526,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i16.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5541,7 +5541,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i16.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5558,7 +5558,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5571,7 +5571,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5584,7 +5584,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i32.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5599,7 +5599,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5616,7 +5616,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5635,7 +5635,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5656,7 +5656,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5679,7 +5679,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5704,7 +5704,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5717,7 +5717,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i32.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5732,7 +5732,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i32.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5749,7 +5749,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5762,7 +5762,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5775,7 +5775,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i64.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5790,7 +5790,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5807,7 +5807,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5826,7 +5826,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5847,7 +5847,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5870,7 +5870,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5895,7 +5895,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5908,7 +5908,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i64.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5923,7 +5923,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5940,7 +5940,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5953,7 +5953,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i8.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5966,7 +5966,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i8.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5981,7 +5981,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i8.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5998,7 +5998,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i8.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6017,7 +6017,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6038,7 +6038,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6061,7 +6061,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6086,7 +6086,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i8.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6099,7 +6099,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i8.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6114,7 +6114,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i8.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6131,7 +6131,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i8.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6144,7 +6144,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i16.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6157,7 +6157,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i16.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6172,7 +6172,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i16.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6189,7 +6189,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i16.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6208,7 +6208,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6229,7 +6229,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6252,7 +6252,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6277,7 +6277,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i16.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6290,7 +6290,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i16.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6305,7 +6305,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i16.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6322,7 +6322,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i16.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6335,7 +6335,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i32.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6348,7 +6348,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i32.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6363,7 +6363,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6380,7 +6380,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i32.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6399,7 +6399,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6420,7 +6420,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6443,7 +6443,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6468,7 +6468,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i32.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6481,7 +6481,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i32.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6496,7 +6496,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6513,7 +6513,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i32.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6526,7 +6526,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i64.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6539,7 +6539,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i64.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6554,7 +6554,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i64.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6571,7 +6571,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i64.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6590,7 +6590,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6611,7 +6611,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6634,7 +6634,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6659,7 +6659,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i64.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6672,7 +6672,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i64.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6687,7 +6687,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i64.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6704,7 +6704,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i64.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6717,7 +6717,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i8.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6730,7 +6730,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i8.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6745,7 +6745,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i8.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6762,7 +6762,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i8.i64( undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6781,7 +6781,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6802,7 +6802,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6825,7 +6825,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6850,7 +6850,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i8.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6863,7 +6863,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i8.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6878,7 +6878,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i8.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6895,7 +6895,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i8.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6908,7 +6908,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i16.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6921,7 +6921,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i16.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6936,7 +6936,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i16.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6953,7 +6953,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i16.i64( undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6972,7 +6972,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6993,7 +6993,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7016,7 +7016,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7041,7 +7041,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i16.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7054,7 +7054,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i16.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7069,7 +7069,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i16.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7086,7 +7086,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i16.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7099,7 +7099,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i32.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7112,7 +7112,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i32.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7127,7 +7127,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i32.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7144,7 +7144,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i32.i64( undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7163,7 +7163,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7184,7 +7184,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7207,7 +7207,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7232,7 +7232,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i32.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7245,7 +7245,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i32.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7260,7 +7260,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i32.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7277,7 +7277,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i32.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7290,7 +7290,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i64.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7303,7 +7303,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i64.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7318,7 +7318,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i64.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7335,7 +7335,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i64.i64( undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7354,7 +7354,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7375,7 +7375,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7398,7 +7398,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7423,7 +7423,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i64.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7436,7 +7436,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i64.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7451,7 +7451,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i64.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7468,7 +7468,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i64.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mf.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mf.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mf.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -22,7 +22,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -73,7 +73,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -94,7 +94,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -155,7 +155,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -227,7 +227,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -275,7 +275,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -320,7 +320,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -383,7 +383,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -408,7 +408,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -421,7 +421,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -436,7 +436,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -453,7 +453,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -472,7 +472,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -493,7 +493,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -516,7 +516,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -541,7 +541,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -554,7 +554,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -569,7 +569,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -586,7 +586,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -605,7 +605,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -626,7 +626,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -649,7 +649,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -674,7 +674,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -687,7 +687,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -719,7 +719,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -759,7 +759,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -782,7 +782,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -807,7 +807,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -820,7 +820,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -835,7 +835,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -852,7 +852,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -871,7 +871,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -892,7 +892,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -915,7 +915,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -940,7 +940,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -953,7 +953,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -968,7 +968,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -985,7 +985,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1004,7 +1004,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1025,7 +1025,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1048,7 +1048,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1073,7 +1073,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1086,7 +1086,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1101,7 +1101,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1118,7 +1118,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1137,7 +1137,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1158,7 +1158,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1181,7 +1181,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1206,7 +1206,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1219,7 +1219,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1234,7 +1234,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1251,7 +1251,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1270,7 +1270,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1291,7 +1291,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1314,7 +1314,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1339,7 +1339,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1352,7 +1352,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1367,7 +1367,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1384,7 +1384,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1403,7 +1403,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1424,7 +1424,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1447,7 +1447,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1472,7 +1472,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1485,7 +1485,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1500,7 +1500,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1517,7 +1517,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1536,7 +1536,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1557,7 +1557,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1580,7 +1580,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1605,7 +1605,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1618,7 +1618,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1633,7 +1633,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1650,7 +1650,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1669,7 +1669,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1690,7 +1690,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1713,7 +1713,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1738,7 +1738,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1751,7 +1751,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1766,7 +1766,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1783,7 +1783,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1802,7 +1802,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1823,7 +1823,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1846,7 +1846,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1871,7 +1871,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1884,7 +1884,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1899,7 +1899,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1916,7 +1916,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1935,7 +1935,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1956,7 +1956,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1979,7 +1979,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2004,7 +2004,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2017,7 +2017,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2032,7 +2032,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2049,7 +2049,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2068,7 +2068,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2089,7 +2089,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2112,7 +2112,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2137,7 +2137,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2150,7 +2150,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2165,7 +2165,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2182,7 +2182,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2201,7 +2201,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2222,7 +2222,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2245,7 +2245,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2270,7 +2270,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2283,7 +2283,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2298,7 +2298,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2315,7 +2315,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2334,7 +2334,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2355,7 +2355,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2378,7 +2378,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2403,7 +2403,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2416,7 +2416,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2431,7 +2431,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2448,7 +2448,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2467,7 +2467,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2488,7 +2488,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2511,7 +2511,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2536,7 +2536,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2549,7 +2549,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2564,7 +2564,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2581,7 +2581,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2600,7 +2600,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2621,7 +2621,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2644,7 +2644,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2669,7 +2669,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2682,7 +2682,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2697,7 +2697,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2714,7 +2714,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2733,7 +2733,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2754,7 +2754,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2777,7 +2777,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2802,7 +2802,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2815,7 +2815,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2830,7 +2830,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2847,7 +2847,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2866,7 +2866,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2887,7 +2887,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2910,7 +2910,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2935,7 +2935,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2948,7 +2948,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2963,7 +2963,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2980,7 +2980,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2999,7 +2999,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3020,7 +3020,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3043,7 +3043,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3068,7 +3068,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3081,7 +3081,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3096,7 +3096,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3113,7 +3113,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3132,7 +3132,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3153,7 +3153,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3176,7 +3176,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3201,7 +3201,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3214,7 +3214,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3229,7 +3229,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3246,7 +3246,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3265,7 +3265,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3286,7 +3286,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3309,7 +3309,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3334,7 +3334,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3347,7 +3347,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3362,7 +3362,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3379,7 +3379,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3398,7 +3398,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3419,7 +3419,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3442,7 +3442,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3467,7 +3467,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3480,7 +3480,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3495,7 +3495,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3512,7 +3512,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3531,7 +3531,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3552,7 +3552,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3575,7 +3575,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3600,7 +3600,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3613,7 +3613,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3628,7 +3628,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3645,7 +3645,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3664,7 +3664,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3685,7 +3685,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3708,7 +3708,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3733,7 +3733,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3746,7 +3746,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3761,7 +3761,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3778,7 +3778,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3797,7 +3797,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3818,7 +3818,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3841,7 +3841,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3866,7 +3866,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3879,7 +3879,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3894,7 +3894,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3911,7 +3911,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3930,7 +3930,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3951,7 +3951,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3974,7 +3974,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3999,7 +3999,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4012,7 +4012,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4027,7 +4027,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4044,7 +4044,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4063,7 +4063,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4084,7 +4084,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4107,7 +4107,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4132,7 +4132,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4145,7 +4145,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4160,7 +4160,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4177,7 +4177,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4196,7 +4196,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4217,7 +4217,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4240,7 +4240,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4265,7 +4265,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4278,7 +4278,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4293,7 +4293,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4310,7 +4310,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4329,7 +4329,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4350,7 +4350,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4373,7 +4373,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4398,7 +4398,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4411,7 +4411,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4426,7 +4426,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4443,7 +4443,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4462,7 +4462,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4483,7 +4483,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4506,7 +4506,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4531,7 +4531,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4544,7 +4544,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4559,7 +4559,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4576,7 +4576,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4595,7 +4595,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4616,7 +4616,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4639,7 +4639,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4664,7 +4664,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4677,7 +4677,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4692,7 +4692,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4709,7 +4709,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4728,7 +4728,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4749,7 +4749,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4772,7 +4772,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4797,7 +4797,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4810,7 +4810,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4825,7 +4825,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4842,7 +4842,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4861,7 +4861,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4882,7 +4882,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4905,7 +4905,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4930,7 +4930,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4943,7 +4943,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4958,7 +4958,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4975,7 +4975,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4994,7 +4994,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5015,7 +5015,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5038,7 +5038,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5063,7 +5063,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5076,7 +5076,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5091,7 +5091,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5108,7 +5108,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5127,7 +5127,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5148,7 +5148,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5171,7 +5171,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5196,7 +5196,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5209,7 +5209,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5224,7 +5224,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5241,7 +5241,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5260,7 +5260,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5281,7 +5281,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5304,7 +5304,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5329,7 +5329,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5342,7 +5342,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5357,7 +5357,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5374,7 +5374,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5393,7 +5393,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5414,7 +5414,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5437,7 +5437,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5462,7 +5462,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5475,7 +5475,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5490,7 +5490,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5507,7 +5507,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5526,7 +5526,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5547,7 +5547,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5570,7 +5570,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5595,7 +5595,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5608,7 +5608,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5623,7 +5623,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5640,7 +5640,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5659,7 +5659,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5680,7 +5680,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5703,7 +5703,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5728,7 +5728,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5741,7 +5741,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5756,7 +5756,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5773,7 +5773,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5792,7 +5792,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5813,7 +5813,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5836,7 +5836,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5861,7 +5861,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5874,7 +5874,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5889,7 +5889,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5906,7 +5906,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5925,7 +5925,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5946,7 +5946,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5969,7 +5969,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5994,7 +5994,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6007,7 +6007,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6022,7 +6022,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6039,7 +6039,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6058,7 +6058,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6079,7 +6079,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6102,7 +6102,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6127,7 +6127,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6140,7 +6140,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6155,7 +6155,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6172,7 +6172,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6191,7 +6191,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6212,7 +6212,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6235,7 +6235,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6260,7 +6260,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6273,7 +6273,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6288,7 +6288,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6305,7 +6305,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6324,7 +6324,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6345,7 +6345,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6368,7 +6368,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6393,7 +6393,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i8.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6406,7 +6406,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i8.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6421,7 +6421,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i8.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6438,7 +6438,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i8.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6457,7 +6457,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6478,7 +6478,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6501,7 +6501,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6526,7 +6526,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i16.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6539,7 +6539,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i16.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6554,7 +6554,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i16.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6571,7 +6571,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i16.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6590,7 +6590,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6611,7 +6611,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6634,7 +6634,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6659,7 +6659,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i32.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6672,7 +6672,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i32.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6687,7 +6687,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6704,7 +6704,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i32.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6723,7 +6723,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6744,7 +6744,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6767,7 +6767,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6792,7 +6792,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i64.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6805,7 +6805,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i64.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6820,7 +6820,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i64.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6837,7 +6837,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i64.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6856,7 +6856,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6877,7 +6877,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6900,7 +6900,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -22,7 +22,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -73,7 +73,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -94,7 +94,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -155,7 +155,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv32i8.nxv32i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv32i8.nxv32i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -200,7 +200,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -228,7 +228,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -245,7 +245,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -264,7 +264,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -308,7 +308,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -346,7 +346,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -361,7 +361,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv32i8.nxv32i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv32i8.nxv32i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -391,7 +391,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -404,7 +404,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -419,7 +419,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -436,7 +436,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -455,7 +455,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -476,7 +476,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -499,7 +499,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -524,7 +524,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -552,7 +552,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -569,7 +569,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -597,7 +597,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -614,7 +614,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -633,7 +633,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -677,7 +677,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -715,7 +715,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -730,7 +730,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -766,7 +766,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -787,7 +787,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -835,7 +835,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -848,7 +848,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -863,7 +863,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -880,7 +880,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -893,7 +893,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -906,7 +906,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -921,7 +921,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -938,7 +938,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -957,7 +957,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -978,7 +978,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1001,7 +1001,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1039,7 +1039,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1054,7 +1054,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1071,7 +1071,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1084,7 +1084,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1097,7 +1097,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1112,7 +1112,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1129,7 +1129,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1148,7 +1148,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1169,7 +1169,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1192,7 +1192,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1217,7 +1217,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1230,7 +1230,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1245,7 +1245,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1262,7 +1262,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1275,7 +1275,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1288,7 +1288,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1303,7 +1303,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1320,7 +1320,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1339,7 +1339,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1360,7 +1360,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1383,7 +1383,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1408,7 +1408,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1421,7 +1421,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1436,7 +1436,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1453,7 +1453,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1466,7 +1466,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1481,7 +1481,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1498,7 +1498,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1517,7 +1517,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1538,7 +1538,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1561,7 +1561,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1586,7 +1586,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1599,7 +1599,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1614,7 +1614,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1631,7 +1631,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1644,7 +1644,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1657,7 +1657,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1672,7 +1672,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1689,7 +1689,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1708,7 +1708,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1729,7 +1729,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1752,7 +1752,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1777,7 +1777,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1790,7 +1790,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1805,7 +1805,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1822,7 +1822,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1835,7 +1835,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1848,7 +1848,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1863,7 +1863,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1880,7 +1880,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1899,7 +1899,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1920,7 +1920,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1943,7 +1943,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1968,7 +1968,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1981,7 +1981,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1996,7 +1996,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2013,7 +2013,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2026,7 +2026,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2039,7 +2039,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2054,7 +2054,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2071,7 +2071,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2090,7 +2090,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2111,7 +2111,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2134,7 +2134,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2159,7 +2159,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2172,7 +2172,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2187,7 +2187,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2204,7 +2204,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2217,7 +2217,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2230,7 +2230,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i8.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2245,7 +2245,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2262,7 +2262,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2281,7 +2281,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2302,7 +2302,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2325,7 +2325,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2350,7 +2350,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2363,7 +2363,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i8.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2378,7 +2378,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i8.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2395,7 +2395,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2408,7 +2408,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2421,7 +2421,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i16.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2436,7 +2436,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2453,7 +2453,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2472,7 +2472,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2493,7 +2493,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2516,7 +2516,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2541,7 +2541,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2554,7 +2554,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i16.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2569,7 +2569,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i16.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2586,7 +2586,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2599,7 +2599,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2612,7 +2612,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i32.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2627,7 +2627,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2644,7 +2644,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2663,7 +2663,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2684,7 +2684,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2707,7 +2707,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2732,7 +2732,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2745,7 +2745,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i32.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2760,7 +2760,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i32.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2777,7 +2777,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2790,7 +2790,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2803,7 +2803,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i64.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2818,7 +2818,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2835,7 +2835,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2854,7 +2854,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2875,7 +2875,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2898,7 +2898,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2923,7 +2923,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2936,7 +2936,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i64.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2951,7 +2951,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2968,7 +2968,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2981,7 +2981,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2994,7 +2994,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3009,7 +3009,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3026,7 +3026,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3045,7 +3045,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3066,7 +3066,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3089,7 +3089,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3114,7 +3114,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3127,7 +3127,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3142,7 +3142,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3159,7 +3159,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv32i8.nxv32i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv32i8.nxv32i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3172,7 +3172,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3185,7 +3185,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3200,7 +3200,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3217,7 +3217,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3236,7 +3236,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3257,7 +3257,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3280,7 +3280,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3305,7 +3305,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3318,7 +3318,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3333,7 +3333,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3350,7 +3350,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv32i8.nxv32i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv32i8.nxv32i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3363,7 +3363,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3376,7 +3376,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3391,7 +3391,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3408,7 +3408,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3427,7 +3427,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3448,7 +3448,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3471,7 +3471,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3496,7 +3496,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i8.nxv16i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3509,7 +3509,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv16i8.nxv16i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3524,7 +3524,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv16i8.nxv16i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3541,7 +3541,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i8.nxv8i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3554,7 +3554,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i8.nxv8i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3569,7 +3569,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3586,7 +3586,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3605,7 +3605,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3626,7 +3626,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3649,7 +3649,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3674,7 +3674,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3687,7 +3687,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3702,7 +3702,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3719,7 +3719,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3738,7 +3738,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3759,7 +3759,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3782,7 +3782,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3807,7 +3807,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3820,7 +3820,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3835,7 +3835,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3852,7 +3852,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3865,7 +3865,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3878,7 +3878,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3893,7 +3893,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3910,7 +3910,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3929,7 +3929,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3950,7 +3950,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3973,7 +3973,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3998,7 +3998,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4011,7 +4011,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4026,7 +4026,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4043,7 +4043,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4056,7 +4056,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4069,7 +4069,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4084,7 +4084,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4101,7 +4101,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4120,7 +4120,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4141,7 +4141,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4164,7 +4164,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4189,7 +4189,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4202,7 +4202,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4217,7 +4217,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4234,7 +4234,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16i16.nxv16i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4247,7 +4247,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i16.nxv4i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4260,7 +4260,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i16.nxv4i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4275,7 +4275,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4292,7 +4292,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4311,7 +4311,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4332,7 +4332,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4355,7 +4355,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4380,7 +4380,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i16.nxv8i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4393,7 +4393,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8i16.nxv8i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4408,7 +4408,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8i16.nxv8i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4425,7 +4425,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4438,7 +4438,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4453,7 +4453,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4470,7 +4470,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4489,7 +4489,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4510,7 +4510,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4533,7 +4533,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4558,7 +4558,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4571,7 +4571,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4586,7 +4586,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4603,7 +4603,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4616,7 +4616,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4629,7 +4629,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4644,7 +4644,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4661,7 +4661,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4680,7 +4680,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4701,7 +4701,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4724,7 +4724,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4749,7 +4749,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4762,7 +4762,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4777,7 +4777,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4794,7 +4794,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4807,7 +4807,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4820,7 +4820,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4835,7 +4835,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4852,7 +4852,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4871,7 +4871,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4892,7 +4892,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4915,7 +4915,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4940,7 +4940,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4953,7 +4953,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4968,7 +4968,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4985,7 +4985,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4998,7 +4998,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i32.nxv2i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5011,7 +5011,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i32.nxv2i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5026,7 +5026,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5043,7 +5043,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5062,7 +5062,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5083,7 +5083,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5106,7 +5106,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5131,7 +5131,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i32.nxv4i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5144,7 +5144,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i32.nxv4i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5159,7 +5159,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i32.nxv4i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5176,7 +5176,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8i32.nxv8i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5189,7 +5189,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5202,7 +5202,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i8.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5217,7 +5217,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5234,7 +5234,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5253,7 +5253,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5274,7 +5274,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5297,7 +5297,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5322,7 +5322,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5335,7 +5335,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i8.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5350,7 +5350,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i8.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5367,7 +5367,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5380,7 +5380,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5393,7 +5393,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i16.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5408,7 +5408,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5425,7 +5425,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5444,7 +5444,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5465,7 +5465,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5488,7 +5488,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5513,7 +5513,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5526,7 +5526,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i16.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5541,7 +5541,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i16.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5558,7 +5558,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5571,7 +5571,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5584,7 +5584,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i32.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5599,7 +5599,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5616,7 +5616,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5635,7 +5635,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5656,7 +5656,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5679,7 +5679,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5704,7 +5704,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5717,7 +5717,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i32.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5732,7 +5732,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i32.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5749,7 +5749,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5762,7 +5762,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i64.nxv1i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5775,7 +5775,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i64.nxv1i64.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5790,7 +5790,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5807,7 +5807,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5826,7 +5826,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5847,7 +5847,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5870,7 +5870,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5895,7 +5895,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i64.nxv2i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5908,7 +5908,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i64.nxv2i64.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5923,7 +5923,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i64.nxv2i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5940,7 +5940,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i64.nxv4i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5953,7 +5953,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i8.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5966,7 +5966,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i8.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5981,7 +5981,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i8.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5998,7 +5998,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i8.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6017,7 +6017,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6038,7 +6038,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6061,7 +6061,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6086,7 +6086,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i8.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6099,7 +6099,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i8.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6114,7 +6114,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i8.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6131,7 +6131,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i8.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6144,7 +6144,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i16.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6157,7 +6157,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i16.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6172,7 +6172,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i16.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6189,7 +6189,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i16.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6208,7 +6208,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6229,7 +6229,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6252,7 +6252,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6277,7 +6277,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i16.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6290,7 +6290,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i16.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6305,7 +6305,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i16.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6322,7 +6322,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i16.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6335,7 +6335,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i32.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6348,7 +6348,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i32.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6363,7 +6363,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6380,7 +6380,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i32.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6399,7 +6399,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6420,7 +6420,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6443,7 +6443,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6468,7 +6468,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i32.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6481,7 +6481,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i32.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6496,7 +6496,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6513,7 +6513,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i32.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6526,7 +6526,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i64.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6539,7 +6539,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i64.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6554,7 +6554,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i64.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6571,7 +6571,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i64.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6590,7 +6590,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6611,7 +6611,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6634,7 +6634,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6659,7 +6659,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i64.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6672,7 +6672,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i64.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6687,7 +6687,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i64.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6704,7 +6704,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i64.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6717,7 +6717,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i8.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6730,7 +6730,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i8.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6745,7 +6745,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i8.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6762,7 +6762,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i8.i64( undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6781,7 +6781,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6802,7 +6802,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6825,7 +6825,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6850,7 +6850,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i8.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6863,7 +6863,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i8.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6878,7 +6878,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i8.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6895,7 +6895,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i8.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6908,7 +6908,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i16.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6921,7 +6921,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i16.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6936,7 +6936,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i16.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6953,7 +6953,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i16.i64( undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6972,7 +6972,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6993,7 +6993,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7016,7 +7016,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7041,7 +7041,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i16.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7054,7 +7054,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i16.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7069,7 +7069,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i16.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7086,7 +7086,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i16.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7099,7 +7099,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i32.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7112,7 +7112,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i32.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7127,7 +7127,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i32.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7144,7 +7144,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i32.i64( undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7163,7 +7163,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7184,7 +7184,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7207,7 +7207,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7232,7 +7232,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i32.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7245,7 +7245,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i32.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7260,7 +7260,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i32.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7277,7 +7277,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i32.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7290,7 +7290,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i64.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7303,7 +7303,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i64.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7318,7 +7318,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i64.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7335,7 +7335,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i64.i64( undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7354,7 +7354,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7375,7 +7375,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7398,7 +7398,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7423,7 +7423,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i64.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7436,7 +7436,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i64.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7451,7 +7451,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i64.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7468,7 +7468,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i64.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask.c @@ -7481,7 +7481,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i8.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7494,7 +7494,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i8.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7509,7 +7509,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i8.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7526,7 +7526,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i8.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7545,7 +7545,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7566,7 +7566,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7589,7 +7589,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7614,7 +7614,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i8.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7627,7 +7627,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i8.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7642,7 +7642,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i8.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7659,7 +7659,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i8.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7672,7 +7672,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i16.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7685,7 +7685,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i16.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7700,7 +7700,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i16.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7717,7 +7717,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i16.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7736,7 +7736,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7757,7 +7757,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7780,7 +7780,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7805,7 +7805,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i16.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7818,7 +7818,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i16.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7833,7 +7833,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i16.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7850,7 +7850,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i16.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7863,7 +7863,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i32.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7876,7 +7876,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i32.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7891,7 +7891,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i32.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7908,7 +7908,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i32.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7927,7 +7927,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7948,7 +7948,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7971,7 +7971,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7996,7 +7996,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i32.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -8009,7 +8009,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i32.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -8024,7 +8024,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i32.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -8041,7 +8041,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -8054,7 +8054,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i64.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -8067,7 +8067,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i64.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -8082,7 +8082,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i64.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -8099,7 +8099,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i64.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -8118,7 +8118,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -8139,7 +8139,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -8162,7 +8162,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -8187,7 +8187,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i64.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -8200,7 +8200,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i64.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -8215,7 +8215,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i64.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask_mf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask_mf.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask_mf.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask_mf.c @@ -6925,7 +6925,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i8.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6938,7 +6938,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i8.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6953,7 +6953,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i8.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6970,7 +6970,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i8.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6989,7 +6989,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7010,7 +7010,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7033,7 +7033,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7058,7 +7058,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i8.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7071,7 +7071,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i8.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7086,7 +7086,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i8.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7103,7 +7103,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i8.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7122,7 +7122,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7143,7 +7143,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7166,7 +7166,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7191,7 +7191,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i16.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7204,7 +7204,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i16.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7219,7 +7219,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i16.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7236,7 +7236,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i16.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7255,7 +7255,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7276,7 +7276,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7299,7 +7299,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7324,7 +7324,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i16.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7337,7 +7337,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i16.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7352,7 +7352,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i16.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7369,7 +7369,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i16.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7388,7 +7388,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7409,7 +7409,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7432,7 +7432,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7457,7 +7457,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i32.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7470,7 +7470,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i32.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7485,7 +7485,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i32.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7502,7 +7502,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i32.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7521,7 +7521,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7542,7 +7542,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7565,7 +7565,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7590,7 +7590,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i32.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7603,7 +7603,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i32.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7618,7 +7618,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i32.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7635,7 +7635,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i32.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7654,7 +7654,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7675,7 +7675,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7698,7 +7698,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7723,7 +7723,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i64.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7736,7 +7736,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i64.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7751,7 +7751,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i64.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7768,7 +7768,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i64.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7787,7 +7787,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7808,7 +7808,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7831,7 +7831,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7856,7 +7856,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i64.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7869,7 +7869,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i64.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7884,7 +7884,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i64.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7901,7 +7901,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i64.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7920,7 +7920,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7941,7 +7941,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7964,7 +7964,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mf.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mf.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mf.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -22,7 +22,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -73,7 +73,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -94,7 +94,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -155,7 +155,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -227,7 +227,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -275,7 +275,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -320,7 +320,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -383,7 +383,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -408,7 +408,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -421,7 +421,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -436,7 +436,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -453,7 +453,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -472,7 +472,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -493,7 +493,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -516,7 +516,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -541,7 +541,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -554,7 +554,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -569,7 +569,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -586,7 +586,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -605,7 +605,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -626,7 +626,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -649,7 +649,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -674,7 +674,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -687,7 +687,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -719,7 +719,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -759,7 +759,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -782,7 +782,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -807,7 +807,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -820,7 +820,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -835,7 +835,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -852,7 +852,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -871,7 +871,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -892,7 +892,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -915,7 +915,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -940,7 +940,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -953,7 +953,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -968,7 +968,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -985,7 +985,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1004,7 +1004,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1025,7 +1025,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1048,7 +1048,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1073,7 +1073,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1086,7 +1086,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1101,7 +1101,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1118,7 +1118,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1137,7 +1137,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1158,7 +1158,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1181,7 +1181,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1206,7 +1206,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1219,7 +1219,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1234,7 +1234,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1251,7 +1251,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1270,7 +1270,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1291,7 +1291,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1314,7 +1314,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1339,7 +1339,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1352,7 +1352,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1367,7 +1367,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1384,7 +1384,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1403,7 +1403,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1424,7 +1424,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1447,7 +1447,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1472,7 +1472,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1485,7 +1485,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1500,7 +1500,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1517,7 +1517,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1536,7 +1536,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1557,7 +1557,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1580,7 +1580,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1605,7 +1605,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1618,7 +1618,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1633,7 +1633,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1650,7 +1650,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1669,7 +1669,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1690,7 +1690,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1713,7 +1713,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1738,7 +1738,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1751,7 +1751,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1766,7 +1766,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1783,7 +1783,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1802,7 +1802,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1823,7 +1823,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1846,7 +1846,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1871,7 +1871,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1884,7 +1884,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1899,7 +1899,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1916,7 +1916,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1935,7 +1935,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1956,7 +1956,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1979,7 +1979,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2004,7 +2004,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2017,7 +2017,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2032,7 +2032,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2049,7 +2049,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2068,7 +2068,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2089,7 +2089,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2112,7 +2112,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2137,7 +2137,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2150,7 +2150,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2165,7 +2165,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2182,7 +2182,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2201,7 +2201,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2222,7 +2222,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2245,7 +2245,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2270,7 +2270,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2283,7 +2283,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2298,7 +2298,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2315,7 +2315,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2334,7 +2334,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2355,7 +2355,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2378,7 +2378,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2403,7 +2403,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2416,7 +2416,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2431,7 +2431,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2448,7 +2448,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2467,7 +2467,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2488,7 +2488,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2511,7 +2511,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2536,7 +2536,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2549,7 +2549,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2564,7 +2564,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2581,7 +2581,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2600,7 +2600,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2621,7 +2621,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2644,7 +2644,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2669,7 +2669,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2682,7 +2682,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2697,7 +2697,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2714,7 +2714,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2733,7 +2733,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2754,7 +2754,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2777,7 +2777,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2802,7 +2802,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2815,7 +2815,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2830,7 +2830,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2847,7 +2847,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2866,7 +2866,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2887,7 +2887,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2910,7 +2910,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2935,7 +2935,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2948,7 +2948,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2963,7 +2963,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2980,7 +2980,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2999,7 +2999,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3020,7 +3020,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3043,7 +3043,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3068,7 +3068,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3081,7 +3081,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3096,7 +3096,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3113,7 +3113,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3132,7 +3132,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3153,7 +3153,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3176,7 +3176,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3201,7 +3201,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3214,7 +3214,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3229,7 +3229,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3246,7 +3246,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3265,7 +3265,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3286,7 +3286,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3309,7 +3309,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3334,7 +3334,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3347,7 +3347,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3362,7 +3362,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3379,7 +3379,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3398,7 +3398,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3419,7 +3419,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3442,7 +3442,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3467,7 +3467,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3480,7 +3480,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3495,7 +3495,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3512,7 +3512,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3531,7 +3531,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3552,7 +3552,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3575,7 +3575,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3600,7 +3600,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3613,7 +3613,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3628,7 +3628,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3645,7 +3645,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3664,7 +3664,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3685,7 +3685,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3708,7 +3708,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3733,7 +3733,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3746,7 +3746,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3761,7 +3761,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3778,7 +3778,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3797,7 +3797,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3818,7 +3818,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3841,7 +3841,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3866,7 +3866,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3879,7 +3879,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3894,7 +3894,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3911,7 +3911,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3930,7 +3930,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3951,7 +3951,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3974,7 +3974,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3999,7 +3999,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4012,7 +4012,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4027,7 +4027,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4044,7 +4044,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4063,7 +4063,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4084,7 +4084,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4107,7 +4107,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4132,7 +4132,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4145,7 +4145,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4160,7 +4160,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4177,7 +4177,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4196,7 +4196,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4217,7 +4217,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4240,7 +4240,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4265,7 +4265,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4278,7 +4278,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4293,7 +4293,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4310,7 +4310,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4329,7 +4329,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4350,7 +4350,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4373,7 +4373,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4398,7 +4398,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i8.nxv1i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4411,7 +4411,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i8.nxv1i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4426,7 +4426,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4443,7 +4443,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4462,7 +4462,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4483,7 +4483,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4506,7 +4506,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4531,7 +4531,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i8.nxv2i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4544,7 +4544,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i8.nxv2i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4559,7 +4559,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4576,7 +4576,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4595,7 +4595,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4616,7 +4616,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4639,7 +4639,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4664,7 +4664,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4i8.nxv4i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4677,7 +4677,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4i8.nxv4i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4692,7 +4692,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4709,7 +4709,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4728,7 +4728,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4749,7 +4749,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4772,7 +4772,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4797,7 +4797,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4810,7 +4810,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4825,7 +4825,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4842,7 +4842,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4861,7 +4861,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4882,7 +4882,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4905,7 +4905,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4930,7 +4930,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4943,7 +4943,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4958,7 +4958,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4975,7 +4975,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4994,7 +4994,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5015,7 +5015,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5038,7 +5038,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5063,7 +5063,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5076,7 +5076,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5091,7 +5091,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5108,7 +5108,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5127,7 +5127,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5148,7 +5148,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5171,7 +5171,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5196,7 +5196,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5209,7 +5209,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5224,7 +5224,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5241,7 +5241,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5260,7 +5260,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5281,7 +5281,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5304,7 +5304,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5329,7 +5329,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5342,7 +5342,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5357,7 +5357,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5374,7 +5374,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5393,7 +5393,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5414,7 +5414,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5437,7 +5437,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5462,7 +5462,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5475,7 +5475,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5490,7 +5490,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5507,7 +5507,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5526,7 +5526,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5547,7 +5547,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5570,7 +5570,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5595,7 +5595,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i16.nxv1i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5608,7 +5608,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i16.nxv1i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5623,7 +5623,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5640,7 +5640,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5659,7 +5659,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5680,7 +5680,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5703,7 +5703,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5728,7 +5728,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2i16.nxv2i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5741,7 +5741,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2i16.nxv2i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5756,7 +5756,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5773,7 +5773,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5792,7 +5792,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5813,7 +5813,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5836,7 +5836,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5861,7 +5861,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5874,7 +5874,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5889,7 +5889,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5906,7 +5906,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5925,7 +5925,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5946,7 +5946,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5969,7 +5969,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5994,7 +5994,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6007,7 +6007,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6022,7 +6022,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6039,7 +6039,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6058,7 +6058,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6079,7 +6079,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6102,7 +6102,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6127,7 +6127,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6140,7 +6140,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6155,7 +6155,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6172,7 +6172,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6191,7 +6191,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6212,7 +6212,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6235,7 +6235,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6260,7 +6260,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1i32.nxv1i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6273,7 +6273,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1i32.nxv1i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6288,7 +6288,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6305,7 +6305,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6324,7 +6324,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6345,7 +6345,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6368,7 +6368,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6393,7 +6393,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i8.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6406,7 +6406,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i8.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6421,7 +6421,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i8.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6438,7 +6438,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i8.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6457,7 +6457,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6478,7 +6478,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6501,7 +6501,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6526,7 +6526,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i16.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6539,7 +6539,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i16.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6554,7 +6554,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i16.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6571,7 +6571,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i16.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6590,7 +6590,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6611,7 +6611,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6634,7 +6634,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6659,7 +6659,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i32.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6672,7 +6672,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i32.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6687,7 +6687,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6704,7 +6704,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i32.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6723,7 +6723,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6744,7 +6744,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6767,7 +6767,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6792,7 +6792,7 @@ // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i64.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6805,7 +6805,7 @@ // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i64.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6820,7 +6820,7 @@ // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i64.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6837,7 +6837,7 @@ // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i64.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6856,7 +6856,7 @@ // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6877,7 +6877,7 @@ // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6900,7 +6900,7 @@ // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c @@ -15,7 +15,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -24,7 +24,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -37,7 +37,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i8.i32( undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -48,7 +48,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -63,7 +63,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i8.i32( undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -76,7 +76,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -93,7 +93,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i8.i32( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -108,7 +108,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -127,7 +127,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i8.i32( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -144,7 +144,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -165,7 +165,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i8.i32( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -184,7 +184,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -207,7 +207,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i8.i32( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -228,7 +228,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -253,7 +253,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -262,7 +262,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -275,7 +275,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i8.i32( undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -286,7 +286,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -301,7 +301,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i8.i32( undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -314,7 +314,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -331,7 +331,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i8.i32( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -346,7 +346,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -365,7 +365,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i8.i32( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -382,7 +382,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -403,7 +403,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i8.i32( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -422,7 +422,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -445,7 +445,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i8.i32( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -466,7 +466,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -491,7 +491,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -500,7 +500,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -513,7 +513,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i8.i32( undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -524,7 +524,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -539,7 +539,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i8.i32( undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -552,7 +552,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -569,7 +569,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i8.i32( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -584,7 +584,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -603,7 +603,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i8.i32( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -620,7 +620,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -641,7 +641,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i8.i32( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -660,7 +660,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -683,7 +683,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i8.i32( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -704,7 +704,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -729,7 +729,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -738,7 +738,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -751,7 +751,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i8.i32( undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -762,7 +762,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -777,7 +777,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i8.i32( undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -790,7 +790,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -807,7 +807,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e8_v_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv8i8.i32( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -822,7 +822,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv8i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -841,7 +841,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e8_v_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv8i8.i32( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -858,7 +858,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv8i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -879,7 +879,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e8_v_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv8i8.i32( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -898,7 +898,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -921,7 +921,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e8_v_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv8i8.i32( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -942,7 +942,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -967,7 +967,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -976,7 +976,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -989,7 +989,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv16i8.i32( undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1000,7 +1000,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv16i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1015,7 +1015,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv16i8.i32( undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1028,7 +1028,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv16i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1045,7 +1045,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv32i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv32i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1054,7 +1054,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv32i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv32i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1067,7 +1067,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i16.i32( undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1076,7 +1076,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1089,7 +1089,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i16.i32( undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1100,7 +1100,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1115,7 +1115,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i16.i32( undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1128,7 +1128,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1145,7 +1145,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i16.i32( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1160,7 +1160,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1179,7 +1179,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i16.i32( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1196,7 +1196,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1217,7 +1217,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i16.i32( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1236,7 +1236,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1259,7 +1259,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i16.i32( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1280,7 +1280,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1305,7 +1305,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i16.i32( undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1314,7 +1314,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1327,7 +1327,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i16.i32( undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1338,7 +1338,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1353,7 +1353,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i16.i32( undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1366,7 +1366,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1383,7 +1383,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i16.i32( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1398,7 +1398,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1417,7 +1417,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i16.i32( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1434,7 +1434,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1455,7 +1455,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i16.i32( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1474,7 +1474,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1497,7 +1497,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i16.i32( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1518,7 +1518,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1543,7 +1543,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i16.i32( undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1552,7 +1552,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1565,7 +1565,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i16.i32( undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1576,7 +1576,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1591,7 +1591,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i16.i32( undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1604,7 +1604,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1621,7 +1621,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e16_v_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i16.i32( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1636,7 +1636,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1655,7 +1655,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e16_v_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i16.i32( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1672,7 +1672,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1693,7 +1693,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e16_v_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i16.i32( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1712,7 +1712,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1735,7 +1735,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e16_v_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i16.i32( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1756,7 +1756,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1781,7 +1781,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i16.i32( undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1790,7 +1790,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1803,7 +1803,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i16.i32( undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1814,7 +1814,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1829,7 +1829,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i16.i32( undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1842,7 +1842,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1859,7 +1859,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i16.i32( undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1868,7 +1868,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1881,7 +1881,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32_v_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i32( undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1890,7 +1890,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1903,7 +1903,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e32_v_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i32.i32( undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1914,7 +1914,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i32.i64( undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1929,7 +1929,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e32_v_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i32.i32( undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1942,7 +1942,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1959,7 +1959,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e32_v_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i32.i32( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1974,7 +1974,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1993,7 +1993,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e32_v_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i32.i32( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2010,7 +2010,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2031,7 +2031,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e32_v_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i32.i32( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2050,7 +2050,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2073,7 +2073,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e32_v_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i32.i32( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2094,7 +2094,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2119,7 +2119,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i32( undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2128,7 +2128,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2141,7 +2141,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i32.i32( undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2152,7 +2152,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i32.i64( undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2167,7 +2167,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i32.i32( undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2180,7 +2180,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2197,7 +2197,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e32_v_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i32.i32( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2212,7 +2212,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2231,7 +2231,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e32_v_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i32.i32( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2248,7 +2248,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2269,7 +2269,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e32_v_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i32.i32( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2288,7 +2288,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2311,7 +2311,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e32_v_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i32.i32( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2332,7 +2332,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2357,7 +2357,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i32.i32( undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2366,7 +2366,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2379,7 +2379,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i32.i32( undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2390,7 +2390,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i32.i64( undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2405,7 +2405,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i32.i32( undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2418,7 +2418,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2435,7 +2435,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i32( undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2444,7 +2444,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2457,7 +2457,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i64.i32( undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2466,7 +2466,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i64.i64( undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2479,7 +2479,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i64.i32( undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2490,7 +2490,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i64.i64( undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2505,7 +2505,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i64.i32( undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2518,7 +2518,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2535,7 +2535,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e64_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i64.i32( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2550,7 +2550,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i64.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2569,7 +2569,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e64_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i64.i32( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2586,7 +2586,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2607,7 +2607,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e64_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i64.i32( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2626,7 +2626,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2649,7 +2649,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e64_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i64.i32( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2670,7 +2670,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2695,7 +2695,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i64.i32( undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2704,7 +2704,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i64.i64( undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2717,7 +2717,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i64.i32( undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2728,7 +2728,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i64.i64( undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2743,7 +2743,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i64.i32( undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2756,7 +2756,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2773,7 +2773,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i64.i32( undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2782,7 +2782,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i64.i64( undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2795,7 +2795,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2804,7 +2804,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2817,7 +2817,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i8.i32( undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2828,7 +2828,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2843,7 +2843,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i8.i32( undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2856,7 +2856,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2873,7 +2873,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i8.i32( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2888,7 +2888,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2907,7 +2907,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i8.i32( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2924,7 +2924,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2945,7 +2945,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i8.i32( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2964,7 +2964,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2987,7 +2987,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i8.i32( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3008,7 +3008,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3033,7 +3033,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3042,7 +3042,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3055,7 +3055,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i8.i32( undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3066,7 +3066,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3081,7 +3081,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i8.i32( undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3094,7 +3094,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3111,7 +3111,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i8.i32( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3126,7 +3126,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3145,7 +3145,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i8.i32( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3162,7 +3162,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3183,7 +3183,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i8.i32( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3202,7 +3202,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3225,7 +3225,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i8.i32( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3246,7 +3246,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3271,7 +3271,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3280,7 +3280,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3293,7 +3293,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i8.i32( undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3304,7 +3304,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3319,7 +3319,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i8.i32( undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3332,7 +3332,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3349,7 +3349,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i8.i32( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3364,7 +3364,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3383,7 +3383,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i8.i32( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3400,7 +3400,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3421,7 +3421,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i8.i32( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3440,7 +3440,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3463,7 +3463,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i8.i32( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3484,7 +3484,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3509,7 +3509,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3518,7 +3518,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3531,7 +3531,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i8.i32( undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3542,7 +3542,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3557,7 +3557,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i8.i32( undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3570,7 +3570,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3587,7 +3587,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e8_v_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv8i8.i32( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3602,7 +3602,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv8i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3621,7 +3621,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e8_v_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv8i8.i32( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3638,7 +3638,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv8i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3659,7 +3659,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e8_v_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv8i8.i32( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3678,7 +3678,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3701,7 +3701,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e8_v_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv8i8.i32( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3722,7 +3722,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3747,7 +3747,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3756,7 +3756,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3769,7 +3769,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv16i8.i32( undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3780,7 +3780,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv16i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3795,7 +3795,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv16i8.i32( undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3808,7 +3808,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv16i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3825,7 +3825,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv32i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv32i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3834,7 +3834,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv32i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv32i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3847,7 +3847,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i16.i32( undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3856,7 +3856,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3869,7 +3869,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i16.i32( undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3880,7 +3880,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3895,7 +3895,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i16.i32( undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3908,7 +3908,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3925,7 +3925,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i16.i32( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3940,7 +3940,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3959,7 +3959,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i16.i32( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3976,7 +3976,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3997,7 +3997,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i16.i32( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4016,7 +4016,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4039,7 +4039,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i16.i32( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4060,7 +4060,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4085,7 +4085,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i16.i32( undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4094,7 +4094,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4107,7 +4107,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i16.i32( undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4118,7 +4118,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4133,7 +4133,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i16.i32( undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4146,7 +4146,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4163,7 +4163,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i16.i32( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4178,7 +4178,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4197,7 +4197,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i16.i32( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4214,7 +4214,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4235,7 +4235,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i16.i32( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4254,7 +4254,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4277,7 +4277,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i16.i32( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4298,7 +4298,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4323,7 +4323,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i16.i32( undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4332,7 +4332,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4345,7 +4345,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i16.i32( undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4356,7 +4356,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4371,7 +4371,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i16.i32( undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4384,7 +4384,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4401,7 +4401,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e16_v_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i16.i32( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4416,7 +4416,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4435,7 +4435,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e16_v_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i16.i32( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4452,7 +4452,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4473,7 +4473,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e16_v_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i16.i32( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4492,7 +4492,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4515,7 +4515,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e16_v_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i16.i32( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4536,7 +4536,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4561,7 +4561,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i16.i32( undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4570,7 +4570,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4583,7 +4583,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i16.i32( undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4594,7 +4594,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4609,7 +4609,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i16.i32( undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4622,7 +4622,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4639,7 +4639,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i16.i32( undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4648,7 +4648,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4661,7 +4661,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i32( undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4670,7 +4670,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4683,7 +4683,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e32_v_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i32.i32( undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4694,7 +4694,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i32.i64( undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4709,7 +4709,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e32_v_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i32.i32( undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4722,7 +4722,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4739,7 +4739,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e32_v_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i32.i32( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4754,7 +4754,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4773,7 +4773,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e32_v_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i32.i32( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4790,7 +4790,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4811,7 +4811,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e32_v_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i32.i32( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4830,7 +4830,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4853,7 +4853,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e32_v_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i32.i32( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4874,7 +4874,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4899,7 +4899,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i32( undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4908,7 +4908,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4921,7 +4921,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i32.i32( undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4932,7 +4932,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i32.i64( undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4947,7 +4947,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i32.i32( undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4960,7 +4960,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4977,7 +4977,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e32_v_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i32.i32( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4992,7 +4992,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5011,7 +5011,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e32_v_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i32.i32( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5028,7 +5028,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5049,7 +5049,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e32_v_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i32.i32( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5068,7 +5068,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5091,7 +5091,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e32_v_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i32.i32( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5112,7 +5112,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5137,7 +5137,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i32.i32( undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5146,7 +5146,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5159,7 +5159,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i32.i32( undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5170,7 +5170,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i32.i64( undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5185,7 +5185,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i32.i32( undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5198,7 +5198,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5215,7 +5215,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i32( undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5224,7 +5224,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5237,7 +5237,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i64.i32( undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5246,7 +5246,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i64.i64( undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5259,7 +5259,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i64.i32( undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5270,7 +5270,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i64.i64( undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5285,7 +5285,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i64.i32( undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5298,7 +5298,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5315,7 +5315,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e64_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i64.i32( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5330,7 +5330,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i64.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5349,7 +5349,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e64_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i64.i32( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5366,7 +5366,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5387,7 +5387,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e64_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i64.i32( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5406,7 +5406,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5429,7 +5429,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e64_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i64.i32( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5450,7 +5450,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5475,7 +5475,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i64.i32( undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5484,7 +5484,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i64.i64( undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5497,7 +5497,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i64.i32( undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5508,7 +5508,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i64.i64( undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5523,7 +5523,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i64.i32( undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5536,7 +5536,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5553,7 +5553,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i64.i32( undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5562,7 +5562,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i64.i64( undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5575,7 +5575,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f32.i32( undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5584,7 +5584,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f32.i64( undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5597,7 +5597,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e32_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f32.i32( undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5608,7 +5608,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f32.i64( undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5623,7 +5623,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e32_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f32.i32( undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5636,7 +5636,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5653,7 +5653,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e32_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f32.i32( undef, undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5668,7 +5668,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f32.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5687,7 +5687,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e32_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f32.i32( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5704,7 +5704,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f32.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5725,7 +5725,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e32_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f32.i32( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5744,7 +5744,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f32.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5767,7 +5767,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e32_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f32.i32( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5788,7 +5788,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f32.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5813,7 +5813,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f32.i32( undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5822,7 +5822,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f32.i64( undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5835,7 +5835,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f32.i32( undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5846,7 +5846,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f32.i64( undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5861,7 +5861,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f32.i32( undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5874,7 +5874,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5891,7 +5891,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e32_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f32.i32( undef, undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5906,7 +5906,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f32.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5925,7 +5925,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e32_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f32.i32( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5942,7 +5942,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f32.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5963,7 +5963,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e32_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f32.i32( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5982,7 +5982,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f32.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6005,7 +6005,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e32_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f32.i32( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6026,7 +6026,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f32.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6051,7 +6051,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f32.i32( undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6060,7 +6060,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f32.i64( undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6073,7 +6073,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f32.i32( undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6084,7 +6084,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f32.i64( undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6099,7 +6099,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f32.i32( undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6112,7 +6112,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6129,7 +6129,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f32.i32( undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6138,7 +6138,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f32.i64( undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6151,7 +6151,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f64.i32( undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6160,7 +6160,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f64.i64( undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6173,7 +6173,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f64.i32( undef, undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6184,7 +6184,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f64.i64( undef, undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6199,7 +6199,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f64.i32( undef, undef, undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6212,7 +6212,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f64.i64( undef, undef, undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6229,7 +6229,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e64_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f64.i32( undef, undef, undef, undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6244,7 +6244,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f64.i64( undef, undef, undef, undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6263,7 +6263,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e64_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f64.i32( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6280,7 +6280,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f64.i64( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6301,7 +6301,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e64_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f64.i32( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6320,7 +6320,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f64.i64( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6343,7 +6343,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e64_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f64.i32( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6364,7 +6364,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f64.i64( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6389,7 +6389,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f64.i32( undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6398,7 +6398,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f64.i64( undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6411,7 +6411,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f64.i32( undef, undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6422,7 +6422,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f64.i64( undef, undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6437,7 +6437,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f64.i32( undef, undef, undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6450,7 +6450,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f64.i64( undef, undef, undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6467,7 +6467,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f64.i32( undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6476,7 +6476,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f64.i64( undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6489,7 +6489,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f16.i32( undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6498,7 +6498,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f16.i64( undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6511,7 +6511,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f16.i32( undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6522,7 +6522,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f16.i64( undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6537,7 +6537,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f16.i32( undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6550,7 +6550,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f16.i64( undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6567,7 +6567,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e16_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f16.i32( undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6582,7 +6582,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f16.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6601,7 +6601,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e16_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f16.i32( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6618,7 +6618,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f16.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6639,7 +6639,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e16_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f16.i32( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6658,7 +6658,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f16.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6681,7 +6681,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e16_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f16.i32( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6702,7 +6702,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f16.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6727,7 +6727,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f16.i32( undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6736,7 +6736,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f16.i64( undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6749,7 +6749,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f16.i32( undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6760,7 +6760,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f16.i64( undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6775,7 +6775,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f16.i32( undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6788,7 +6788,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f16.i64( undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6805,7 +6805,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e16_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f16.i32( undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6820,7 +6820,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f16.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6839,7 +6839,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e16_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f16.i32( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6856,7 +6856,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f16.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6877,7 +6877,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e16_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f16.i32( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6896,7 +6896,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f16.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6919,7 +6919,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e16_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f16.i32( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6940,7 +6940,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f16.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6965,7 +6965,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f16.i32( undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6974,7 +6974,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f16.i64( undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6987,7 +6987,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f16.i32( undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6998,7 +6998,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f16.i64( undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7013,7 +7013,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f16.i32( undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7026,7 +7026,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f16.i64( undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7043,7 +7043,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e16_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4f16.i32( undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7058,7 +7058,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4f16.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7077,7 +7077,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e16_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4f16.i32( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7094,7 +7094,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4f16.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7115,7 +7115,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e16_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4f16.i32( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7134,7 +7134,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4f16.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7157,7 +7157,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e16_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4f16.i32( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7178,7 +7178,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4f16.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7203,7 +7203,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f16.i32( undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7212,7 +7212,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f16.i64( undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7225,7 +7225,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16_v_f16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8f16.i32( undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7236,7 +7236,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8f16.i64( undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7251,7 +7251,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16_v_f16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8f16.i32( undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7264,7 +7264,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8f16.i64( undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7281,7 +7281,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16f16.i32( undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7290,7 +7290,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16f16.i64( undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c @@ -15,7 +15,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -26,7 +26,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -41,7 +41,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i8.i32( undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -54,7 +54,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -71,7 +71,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i8.i32( undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -86,7 +86,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -105,7 +105,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i8.i32( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -122,7 +122,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -143,7 +143,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i8.i32( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -162,7 +162,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -185,7 +185,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i8.i32( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -206,7 +206,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -231,7 +231,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i8.i32( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -254,7 +254,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -281,7 +281,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -292,7 +292,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -307,7 +307,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i8.i32( undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -320,7 +320,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -337,7 +337,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i8.i32( undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -352,7 +352,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -371,7 +371,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i8.i32( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -388,7 +388,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -409,7 +409,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i8.i32( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -428,7 +428,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -451,7 +451,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i8.i32( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -472,7 +472,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -497,7 +497,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i8.i32( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -520,7 +520,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -547,7 +547,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -558,7 +558,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -573,7 +573,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i8.i32( undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -586,7 +586,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -603,7 +603,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i8.i32( undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -618,7 +618,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -637,7 +637,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4i8.i32( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -654,7 +654,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -675,7 +675,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4i8.i32( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -694,7 +694,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -717,7 +717,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4i8.i32( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -738,7 +738,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -763,7 +763,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4i8.i32( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -786,7 +786,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -813,7 +813,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -824,7 +824,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -839,7 +839,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8i8.i32( undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -852,7 +852,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -869,7 +869,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8i8.i32( undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -884,7 +884,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -903,7 +903,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv8i8.i32( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -920,7 +920,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv8i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -941,7 +941,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv8i8.i32( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -960,7 +960,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv8i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -983,7 +983,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv8i8.i32( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -1004,7 +1004,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -1029,7 +1029,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv8i8.i32( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -1052,7 +1052,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -1079,7 +1079,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -1090,7 +1090,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -1105,7 +1105,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv16i8.i32( undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -1118,7 +1118,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv16i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -1135,7 +1135,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv16i8.i32( undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -1150,7 +1150,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv16i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -1169,7 +1169,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv32i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv32i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -1180,7 +1180,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv32i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv32i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -1195,7 +1195,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i16.i32( undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -1206,7 +1206,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -1221,7 +1221,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i16.i32( undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -1234,7 +1234,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -1251,7 +1251,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i16.i32( undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -1266,7 +1266,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -1285,7 +1285,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i16.i32( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -1302,7 +1302,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -1323,7 +1323,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i16.i32( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -1342,7 +1342,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -1365,7 +1365,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i16.i32( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -1386,7 +1386,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -1411,7 +1411,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i16.i32( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -1434,7 +1434,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -1461,7 +1461,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i16.i32( undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -1472,7 +1472,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -1487,7 +1487,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i16.i32( undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -1500,7 +1500,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -1517,7 +1517,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i16.i32( undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -1532,7 +1532,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -1551,7 +1551,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i16.i32( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -1568,7 +1568,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -1589,7 +1589,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i16.i32( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -1608,7 +1608,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -1631,7 +1631,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i16.i32( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -1652,7 +1652,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -1677,7 +1677,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i16.i32( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -1700,7 +1700,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -1727,7 +1727,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i16.i32( undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -1738,7 +1738,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -1753,7 +1753,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i16.i32( undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -1766,7 +1766,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -1783,7 +1783,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i16.i32( undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -1798,7 +1798,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -1817,7 +1817,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4i16.i32( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -1834,7 +1834,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -1855,7 +1855,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4i16.i32( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -1874,7 +1874,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -1897,7 +1897,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4i16.i32( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -1918,7 +1918,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -1943,7 +1943,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4i16.i32( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -1966,7 +1966,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -1993,7 +1993,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i16.i32( undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -2004,7 +2004,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2019,7 +2019,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8i16.i32( undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -2032,7 +2032,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -2049,7 +2049,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8i16.i32( undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -2064,7 +2064,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -2083,7 +2083,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16i16.i32( undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -2094,7 +2094,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2109,7 +2109,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i32.i32( undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -2120,7 +2120,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2135,7 +2135,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i32.i32( undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -2148,7 +2148,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i32.i64( undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -2165,7 +2165,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i32.i32( undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -2180,7 +2180,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -2199,7 +2199,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e32ff_v_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i32.i32( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -2216,7 +2216,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -2237,7 +2237,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e32ff_v_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i32.i32( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -2256,7 +2256,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -2279,7 +2279,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e32ff_v_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i32.i32( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -2300,7 +2300,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -2325,7 +2325,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e32ff_v_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i32.i32( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -2348,7 +2348,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -2375,7 +2375,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i32.i32( undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -2386,7 +2386,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2401,7 +2401,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i32.i32( undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -2414,7 +2414,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i32.i64( undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -2431,7 +2431,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i32.i32( undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -2446,7 +2446,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -2465,7 +2465,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e32ff_v_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i32.i32( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -2482,7 +2482,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -2503,7 +2503,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e32ff_v_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i32.i32( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -2522,7 +2522,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -2545,7 +2545,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e32ff_v_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i32.i32( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -2566,7 +2566,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -2591,7 +2591,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e32ff_v_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i32.i32( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -2614,7 +2614,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -2641,7 +2641,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i32.i32( undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -2652,7 +2652,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2667,7 +2667,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i32.i32( undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -2680,7 +2680,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i32.i64( undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -2697,7 +2697,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i32.i32( undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -2712,7 +2712,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -2731,7 +2731,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i32.i32( undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -2742,7 +2742,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2757,7 +2757,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i64.i32( undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -2768,7 +2768,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i64.i64( undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2783,7 +2783,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e64ff_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i64.i32( undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -2796,7 +2796,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i64.i64( undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -2813,7 +2813,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e64ff_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i64.i32( undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -2828,7 +2828,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -2847,7 +2847,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e64ff_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i64.i32( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -2864,7 +2864,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i64.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -2885,7 +2885,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e64ff_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i64.i32( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -2904,7 +2904,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -2927,7 +2927,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e64ff_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i64.i32( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -2948,7 +2948,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -2973,7 +2973,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e64ff_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i64.i32( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -2996,7 +2996,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -3023,7 +3023,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i64.i32( undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -3034,7 +3034,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i64.i64( undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -3049,7 +3049,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e64ff_v_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i64.i32( undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -3062,7 +3062,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i64.i64( undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -3079,7 +3079,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e64ff_v_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i64.i32( undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -3094,7 +3094,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -3113,7 +3113,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i64.i32( undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -3124,7 +3124,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i64.i64( undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -3139,7 +3139,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -3150,7 +3150,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -3165,7 +3165,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i8.i32( undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -3178,7 +3178,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -3195,7 +3195,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i8.i32( undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -3210,7 +3210,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -3229,7 +3229,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i8.i32( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -3246,7 +3246,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -3267,7 +3267,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i8.i32( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -3286,7 +3286,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -3309,7 +3309,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i8.i32( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -3330,7 +3330,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -3355,7 +3355,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i8.i32( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -3378,7 +3378,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -3405,7 +3405,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -3416,7 +3416,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -3431,7 +3431,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i8.i32( undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -3444,7 +3444,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -3461,7 +3461,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i8.i32( undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -3476,7 +3476,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -3495,7 +3495,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i8.i32( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -3512,7 +3512,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -3533,7 +3533,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i8.i32( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -3552,7 +3552,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -3575,7 +3575,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i8.i32( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -3596,7 +3596,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -3621,7 +3621,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i8.i32( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -3644,7 +3644,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -3671,7 +3671,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -3682,7 +3682,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -3697,7 +3697,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i8.i32( undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -3710,7 +3710,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -3727,7 +3727,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i8.i32( undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -3742,7 +3742,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -3761,7 +3761,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4i8.i32( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -3778,7 +3778,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -3799,7 +3799,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4i8.i32( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -3818,7 +3818,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -3841,7 +3841,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4i8.i32( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -3862,7 +3862,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -3887,7 +3887,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4i8.i32( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -3910,7 +3910,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -3937,7 +3937,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -3948,7 +3948,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -3963,7 +3963,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8i8.i32( undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -3976,7 +3976,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -3993,7 +3993,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8i8.i32( undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -4008,7 +4008,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -4027,7 +4027,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv8i8.i32( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -4044,7 +4044,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv8i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -4065,7 +4065,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv8i8.i32( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -4084,7 +4084,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv8i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -4107,7 +4107,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv8i8.i32( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -4128,7 +4128,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -4153,7 +4153,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv8i8.i32( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -4176,7 +4176,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -4203,7 +4203,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -4214,7 +4214,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -4229,7 +4229,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv16i8.i32( undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -4242,7 +4242,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv16i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -4259,7 +4259,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv16i8.i32( undef, undef, undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -4274,7 +4274,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv16i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -4293,7 +4293,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv32i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv32i8.i32( undef, undef, i8* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -4304,7 +4304,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv32i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv32i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -4319,7 +4319,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i16.i32( undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -4330,7 +4330,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -4345,7 +4345,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i16.i32( undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -4358,7 +4358,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -4375,7 +4375,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i16.i32( undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -4390,7 +4390,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -4409,7 +4409,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i16.i32( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -4426,7 +4426,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -4447,7 +4447,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i16.i32( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -4466,7 +4466,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -4489,7 +4489,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i16.i32( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -4510,7 +4510,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -4535,7 +4535,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i16.i32( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -4558,7 +4558,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -4585,7 +4585,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i16.i32( undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -4596,7 +4596,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -4611,7 +4611,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i16.i32( undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -4624,7 +4624,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -4641,7 +4641,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i16.i32( undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -4656,7 +4656,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -4675,7 +4675,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i16.i32( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -4692,7 +4692,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -4713,7 +4713,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i16.i32( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -4732,7 +4732,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -4755,7 +4755,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i16.i32( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -4776,7 +4776,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -4801,7 +4801,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i16.i32( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -4824,7 +4824,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -4851,7 +4851,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i16.i32( undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -4862,7 +4862,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -4877,7 +4877,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i16.i32( undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -4890,7 +4890,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -4907,7 +4907,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i16.i32( undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -4922,7 +4922,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -4941,7 +4941,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4i16.i32( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -4958,7 +4958,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -4979,7 +4979,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4i16.i32( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -4998,7 +4998,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -5021,7 +5021,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4i16.i32( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -5042,7 +5042,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -5067,7 +5067,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4i16.i32( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -5090,7 +5090,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -5117,7 +5117,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i16.i32( undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -5128,7 +5128,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5143,7 +5143,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8i16.i32( undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -5156,7 +5156,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -5173,7 +5173,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8i16.i32( undef, undef, undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -5188,7 +5188,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -5207,7 +5207,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16i16.i32( undef, undef, i16* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -5218,7 +5218,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5233,7 +5233,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i32.i32( undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -5244,7 +5244,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5259,7 +5259,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i32.i32( undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -5272,7 +5272,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i32.i64( undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -5289,7 +5289,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i32.i32( undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -5304,7 +5304,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -5323,7 +5323,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e32ff_v_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i32.i32( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -5340,7 +5340,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -5361,7 +5361,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e32ff_v_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i32.i32( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -5380,7 +5380,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -5403,7 +5403,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e32ff_v_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i32.i32( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -5424,7 +5424,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -5449,7 +5449,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e32ff_v_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i32.i32( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -5472,7 +5472,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -5499,7 +5499,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i32.i32( undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -5510,7 +5510,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5525,7 +5525,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i32.i32( undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -5538,7 +5538,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i32.i64( undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -5555,7 +5555,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i32.i32( undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -5570,7 +5570,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -5589,7 +5589,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e32ff_v_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2i32.i32( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -5606,7 +5606,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -5627,7 +5627,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e32ff_v_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2i32.i32( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -5646,7 +5646,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -5669,7 +5669,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e32ff_v_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2i32.i32( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -5690,7 +5690,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -5715,7 +5715,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e32ff_v_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2i32.i32( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -5738,7 +5738,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -5765,7 +5765,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i32.i32( undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -5776,7 +5776,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5791,7 +5791,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4i32.i32( undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -5804,7 +5804,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i32.i64( undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -5821,7 +5821,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4i32.i32( undef, undef, undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -5836,7 +5836,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -5855,7 +5855,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8i32.i32( undef, undef, i32* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -5866,7 +5866,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5881,7 +5881,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1i64.i32( undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -5892,7 +5892,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i64.i64( undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5907,7 +5907,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e64ff_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1i64.i32( undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -5920,7 +5920,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i64.i64( undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -5937,7 +5937,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e64ff_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1i64.i32( undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -5952,7 +5952,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -5971,7 +5971,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e64ff_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1i64.i32( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -5988,7 +5988,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i64.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -6009,7 +6009,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e64ff_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1i64.i32( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -6028,7 +6028,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -6051,7 +6051,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e64ff_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1i64.i32( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -6072,7 +6072,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -6097,7 +6097,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e64ff_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1i64.i32( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -6120,7 +6120,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -6147,7 +6147,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2i64.i32( undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -6158,7 +6158,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i64.i64( undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6173,7 +6173,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e64ff_v_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2i64.i32( undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -6186,7 +6186,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i64.i64( undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -6203,7 +6203,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e64ff_v_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2i64.i32( undef, undef, undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -6218,7 +6218,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -6237,7 +6237,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4i64.i32( undef, undef, i64* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -6248,7 +6248,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i64.i64( undef, undef, i64* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6263,7 +6263,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1f32.i32( undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -6274,7 +6274,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1f32.i64( undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6289,7 +6289,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1f32.i32( undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -6302,7 +6302,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1f32.i64( undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -6319,7 +6319,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1f32.i32( undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -6334,7 +6334,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1f32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -6353,7 +6353,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e32ff_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1f32.i32( undef, undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -6370,7 +6370,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1f32.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -6391,7 +6391,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e32ff_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1f32.i32( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -6410,7 +6410,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1f32.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -6433,7 +6433,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e32ff_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1f32.i32( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -6454,7 +6454,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1f32.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -6479,7 +6479,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e32ff_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1f32.i32( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -6502,7 +6502,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1f32.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -6529,7 +6529,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2f32.i32( undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -6540,7 +6540,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2f32.i64( undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6555,7 +6555,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2f32.i32( undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -6568,7 +6568,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2f32.i64( undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -6585,7 +6585,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2f32.i32( undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -6600,7 +6600,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2f32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -6619,7 +6619,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e32ff_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2f32.i32( undef, undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -6636,7 +6636,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2f32.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -6657,7 +6657,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e32ff_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2f32.i32( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -6676,7 +6676,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2f32.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -6699,7 +6699,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e32ff_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2f32.i32( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -6720,7 +6720,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2f32.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -6745,7 +6745,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e32ff_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2f32.i32( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -6768,7 +6768,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2f32.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -6795,7 +6795,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4f32.i32( undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -6806,7 +6806,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4f32.i64( undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6821,7 +6821,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4f32.i32( undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -6834,7 +6834,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4f32.i64( undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -6851,7 +6851,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4f32.i32( undef, undef, undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -6866,7 +6866,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4f32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -6885,7 +6885,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8f32.i32( undef, undef, float* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -6896,7 +6896,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8f32.i64( undef, undef, float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6911,7 +6911,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1f64.i32( undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -6922,7 +6922,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1f64.i64( undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6937,7 +6937,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e64ff_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1f64.i32( undef, undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -6950,7 +6950,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1f64.i64( undef, undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -6967,7 +6967,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e64ff_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1f64.i32( undef, undef, undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -6982,7 +6982,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1f64.i64( undef, undef, undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -7001,7 +7001,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e64ff_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1f64.i32( undef, undef, undef, undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -7018,7 +7018,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1f64.i64( undef, undef, undef, undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -7039,7 +7039,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e64ff_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1f64.i32( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -7058,7 +7058,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1f64.i64( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -7081,7 +7081,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e64ff_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1f64.i32( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -7102,7 +7102,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1f64.i64( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -7127,7 +7127,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e64ff_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1f64.i32( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -7150,7 +7150,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1f64.i64( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -7177,7 +7177,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2f64.i32( undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -7188,7 +7188,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2f64.i64( undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -7203,7 +7203,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e64ff_v_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2f64.i32( undef, undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -7216,7 +7216,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2f64.i64( undef, undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -7233,7 +7233,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e64ff_v_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2f64.i32( undef, undef, undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -7248,7 +7248,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2f64.i64( undef, undef, undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -7267,7 +7267,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4f64.i32( undef, undef, double* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -7278,7 +7278,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4f64.i64( undef, undef, double* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -7293,7 +7293,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv1f16.i32( undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -7304,7 +7304,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1f16.i64( undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -7319,7 +7319,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv1f16.i32( undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -7332,7 +7332,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1f16.i64( undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -7349,7 +7349,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv1f16.i32( undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -7364,7 +7364,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1f16.i64( undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -7383,7 +7383,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv1f16.i32( undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -7400,7 +7400,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1f16.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -7421,7 +7421,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv1f16.i32( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -7440,7 +7440,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1f16.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -7463,7 +7463,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv1f16.i32( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -7484,7 +7484,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1f16.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -7509,7 +7509,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv1f16.i32( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -7532,7 +7532,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1f16.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -7559,7 +7559,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv2f16.i32( undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -7570,7 +7570,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2f16.i64( undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -7585,7 +7585,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv2f16.i32( undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -7598,7 +7598,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2f16.i64( undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -7615,7 +7615,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv2f16.i32( undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -7630,7 +7630,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2f16.i64( undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -7649,7 +7649,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv2f16.i32( undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -7666,7 +7666,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2f16.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -7687,7 +7687,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv2f16.i32( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -7706,7 +7706,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2f16.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -7729,7 +7729,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv2f16.i32( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -7750,7 +7750,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2f16.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -7775,7 +7775,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv2f16.i32( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -7798,7 +7798,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2f16.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -7825,7 +7825,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv4f16.i32( undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -7836,7 +7836,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4f16.i64( undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -7851,7 +7851,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv4f16.i32( undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -7864,7 +7864,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4f16.i64( undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -7881,7 +7881,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv4f16.i32( undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -7896,7 +7896,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4f16.i64( undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -7915,7 +7915,7 @@ // CHECK-RV32-LABEL: @test_vlseg5e16ff_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.nxv4f16.i32( undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -7932,7 +7932,7 @@ // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4f16.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -7953,7 +7953,7 @@ // CHECK-RV32-LABEL: @test_vlseg6e16ff_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.nxv4f16.i32( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -7972,7 +7972,7 @@ // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4f16.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -7995,7 +7995,7 @@ // CHECK-RV32-LABEL: @test_vlseg7e16ff_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.nxv4f16.i32( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -8016,7 +8016,7 @@ // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4f16.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -8041,7 +8041,7 @@ // CHECK-RV32-LABEL: @test_vlseg8e16ff_v_f16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.nxv4f16.i32( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -8064,7 +8064,7 @@ // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4f16.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -8091,7 +8091,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv8f16.i32( undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -8102,7 +8102,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8f16.i64( undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -8117,7 +8117,7 @@ // CHECK-RV32-LABEL: @test_vlseg3e16ff_v_f16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.nxv8f16.i32( undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -8130,7 +8130,7 @@ // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8f16.i64( undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -8147,7 +8147,7 @@ // CHECK-RV32-LABEL: @test_vlseg4e16ff_v_f16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.nxv8f16.i32( undef, undef, undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -8162,7 +8162,7 @@ // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8f16.i64( undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -8181,7 +8181,7 @@ // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_f16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.nxv16f16.i32( undef, undef, half* [[BASE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -8192,7 +8192,7 @@ // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16f16.i64( undef, undef, half* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsseg.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -22,7 +22,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -73,7 +73,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -94,7 +94,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -155,7 +155,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -227,7 +227,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -275,7 +275,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -320,7 +320,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -383,7 +383,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -408,7 +408,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -421,7 +421,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -436,7 +436,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -453,7 +453,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -472,7 +472,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -493,7 +493,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -516,7 +516,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -541,7 +541,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -554,7 +554,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -569,7 +569,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -586,7 +586,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -599,7 +599,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -612,7 +612,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -627,7 +627,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -644,7 +644,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -663,7 +663,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -684,7 +684,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -707,7 +707,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -732,7 +732,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -745,7 +745,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -760,7 +760,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -777,7 +777,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -796,7 +796,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -817,7 +817,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -840,7 +840,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -865,7 +865,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -878,7 +878,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -893,7 +893,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -910,7 +910,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -929,7 +929,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -950,7 +950,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -973,7 +973,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -998,7 +998,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1011,7 +1011,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1043,7 +1043,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1056,7 +1056,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1069,7 +1069,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1084,7 +1084,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1101,7 +1101,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1120,7 +1120,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1141,7 +1141,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1164,7 +1164,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1189,7 +1189,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1202,7 +1202,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1217,7 +1217,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1234,7 +1234,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1253,7 +1253,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1274,7 +1274,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1297,7 +1297,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1322,7 +1322,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1335,7 +1335,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1350,7 +1350,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1367,7 +1367,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1380,7 +1380,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1393,7 +1393,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( undef, undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1408,7 +1408,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1425,7 +1425,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1444,7 +1444,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1465,7 +1465,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1488,7 +1488,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1513,7 +1513,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1526,7 +1526,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( undef, undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1541,7 +1541,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1558,7 +1558,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1571,7 +1571,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1584,7 +1584,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1599,7 +1599,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1616,7 +1616,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1635,7 +1635,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1656,7 +1656,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1679,7 +1679,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1704,7 +1704,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1717,7 +1717,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1732,7 +1732,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1749,7 +1749,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1768,7 +1768,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1789,7 +1789,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1812,7 +1812,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1837,7 +1837,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1850,7 +1850,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1865,7 +1865,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1882,7 +1882,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1901,7 +1901,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1922,7 +1922,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1945,7 +1945,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1970,7 +1970,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1983,7 +1983,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1998,7 +1998,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2015,7 +2015,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2034,7 +2034,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2055,7 +2055,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2078,7 +2078,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2103,7 +2103,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2116,7 +2116,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2131,7 +2131,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2148,7 +2148,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64(i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( undef, undef, i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2161,7 +2161,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2174,7 +2174,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2189,7 +2189,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2206,7 +2206,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2225,7 +2225,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2246,7 +2246,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2269,7 +2269,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2294,7 +2294,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2307,7 +2307,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2322,7 +2322,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2339,7 +2339,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2358,7 +2358,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2379,7 +2379,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2402,7 +2402,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2427,7 +2427,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2440,7 +2440,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2455,7 +2455,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2472,7 +2472,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2491,7 +2491,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2512,7 +2512,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2535,7 +2535,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2560,7 +2560,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2573,7 +2573,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2588,7 +2588,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2605,7 +2605,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64(i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( undef, undef, i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2618,7 +2618,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2631,7 +2631,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2646,7 +2646,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2663,7 +2663,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2682,7 +2682,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2703,7 +2703,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2726,7 +2726,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2751,7 +2751,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2764,7 +2764,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2779,7 +2779,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2796,7 +2796,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2815,7 +2815,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2836,7 +2836,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2859,7 +2859,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2884,7 +2884,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2897,7 +2897,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2912,7 +2912,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2929,7 +2929,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64(i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( undef, undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2942,7 +2942,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2955,7 +2955,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( undef, undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2970,7 +2970,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2987,7 +2987,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3006,7 +3006,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3027,7 +3027,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3050,7 +3050,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3075,7 +3075,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3088,7 +3088,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( undef, undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3103,7 +3103,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3120,7 +3120,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64(i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( undef, undef, i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3133,7 +3133,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f32.i64( undef, undef, float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3146,7 +3146,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f32.i64( undef, undef, undef, float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3161,7 +3161,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3178,7 +3178,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f32.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3197,7 +3197,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f32.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3218,7 +3218,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f32.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3241,7 +3241,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f32.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3266,7 +3266,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f32.i64( undef, undef, float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3279,7 +3279,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f32.i64( undef, undef, undef, float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3294,7 +3294,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3311,7 +3311,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f32.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3330,7 +3330,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f32.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3351,7 +3351,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f32.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3374,7 +3374,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f32.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3399,7 +3399,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f32.i64( undef, undef, float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3412,7 +3412,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f32.i64( undef, undef, undef, float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3427,7 +3427,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3444,7 +3444,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f32.i64( undef, undef, float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3457,7 +3457,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f64.i64( undef, undef, double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3470,7 +3470,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f64.i64( undef, undef, undef, double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3485,7 +3485,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f64.i64( undef, undef, undef, undef, double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3502,7 +3502,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f64.i64( undef, undef, undef, undef, undef, double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3521,7 +3521,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f64.i64( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3542,7 +3542,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f64.i64( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3565,7 +3565,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f64.i64( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3590,7 +3590,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f64.i64( undef, undef, double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3603,7 +3603,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f64.i64( undef, undef, undef, double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3618,7 +3618,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f64.i64( undef, undef, undef, undef, double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3635,7 +3635,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f64.i64( undef, undef, double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7287,7 +7287,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f16.i64( undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7300,7 +7300,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f16.i64( undef, undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7315,7 +7315,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f16.i64( undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7332,7 +7332,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f16.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7351,7 +7351,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f16.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7372,7 +7372,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f16.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7395,7 +7395,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f16.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7420,7 +7420,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f16.i64( undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7433,7 +7433,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f16.i64( undef, undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7448,7 +7448,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f16.i64( undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7465,7 +7465,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f16.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7484,7 +7484,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f16.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7505,7 +7505,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f16.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7528,7 +7528,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f16.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7553,7 +7553,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f16.i64( undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7566,7 +7566,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f16.i64( undef, undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7581,7 +7581,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f16.i64( undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7598,7 +7598,7 @@ // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4f16.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7617,7 +7617,7 @@ // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4f16.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7638,7 +7638,7 @@ // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4f16.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7661,7 +7661,7 @@ // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4f16.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7686,7 +7686,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f16.i64( undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7699,7 +7699,7 @@ // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8f16.i64( undef, undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7714,7 +7714,7 @@ // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8f16.i64( undef, undef, undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7731,7 +7731,7 @@ // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16f16.i64( undef, undef, half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -22,7 +22,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -73,7 +73,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -94,7 +94,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -155,7 +155,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv32i8.nxv32i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv32i8.nxv32i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -200,7 +200,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -213,7 +213,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -228,7 +228,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -245,7 +245,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -264,7 +264,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -285,7 +285,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -308,7 +308,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -333,7 +333,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -346,7 +346,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -361,7 +361,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv32i8.nxv32i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv32i8.nxv32i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -391,7 +391,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -404,7 +404,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -419,7 +419,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -436,7 +436,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -455,7 +455,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -476,7 +476,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -499,7 +499,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -524,7 +524,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -537,7 +537,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -552,7 +552,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -569,7 +569,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -582,7 +582,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -597,7 +597,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -614,7 +614,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -633,7 +633,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -654,7 +654,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -677,7 +677,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -715,7 +715,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -730,7 +730,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -747,7 +747,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -766,7 +766,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -787,7 +787,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -810,7 +810,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -835,7 +835,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -848,7 +848,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -863,7 +863,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -880,7 +880,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -893,7 +893,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -906,7 +906,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -921,7 +921,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -938,7 +938,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -957,7 +957,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -978,7 +978,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1001,7 +1001,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1026,7 +1026,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1039,7 +1039,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1054,7 +1054,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1071,7 +1071,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1084,7 +1084,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1097,7 +1097,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1112,7 +1112,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1129,7 +1129,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1148,7 +1148,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1169,7 +1169,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1192,7 +1192,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1217,7 +1217,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1230,7 +1230,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1245,7 +1245,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1262,7 +1262,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1275,7 +1275,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1288,7 +1288,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1303,7 +1303,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1320,7 +1320,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1339,7 +1339,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1360,7 +1360,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1383,7 +1383,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1408,7 +1408,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1421,7 +1421,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1436,7 +1436,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1453,7 +1453,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1466,7 +1466,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1481,7 +1481,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1498,7 +1498,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1517,7 +1517,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1538,7 +1538,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1561,7 +1561,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1586,7 +1586,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1599,7 +1599,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1614,7 +1614,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1631,7 +1631,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1644,7 +1644,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1657,7 +1657,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1672,7 +1672,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1689,7 +1689,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1708,7 +1708,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1729,7 +1729,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1752,7 +1752,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1777,7 +1777,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1790,7 +1790,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1805,7 +1805,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1822,7 +1822,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1835,7 +1835,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1848,7 +1848,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1863,7 +1863,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1880,7 +1880,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1899,7 +1899,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1920,7 +1920,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1943,7 +1943,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1968,7 +1968,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1981,7 +1981,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1996,7 +1996,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2013,7 +2013,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2026,7 +2026,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2039,7 +2039,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2054,7 +2054,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2071,7 +2071,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2090,7 +2090,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2111,7 +2111,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2134,7 +2134,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2159,7 +2159,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2172,7 +2172,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2187,7 +2187,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2204,7 +2204,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2217,7 +2217,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2230,7 +2230,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i8.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2245,7 +2245,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2262,7 +2262,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2281,7 +2281,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2302,7 +2302,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2325,7 +2325,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2350,7 +2350,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2363,7 +2363,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i8.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2378,7 +2378,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i8.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2395,7 +2395,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2408,7 +2408,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2421,7 +2421,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i16.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2436,7 +2436,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2453,7 +2453,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2472,7 +2472,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2493,7 +2493,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2516,7 +2516,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2541,7 +2541,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2554,7 +2554,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i16.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2569,7 +2569,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i16.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2586,7 +2586,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2599,7 +2599,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2612,7 +2612,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i32.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2627,7 +2627,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2644,7 +2644,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2663,7 +2663,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2684,7 +2684,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2707,7 +2707,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2732,7 +2732,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2745,7 +2745,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i32.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2760,7 +2760,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i32.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2777,7 +2777,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2790,7 +2790,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2803,7 +2803,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i64.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2818,7 +2818,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2835,7 +2835,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2854,7 +2854,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2875,7 +2875,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2898,7 +2898,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2923,7 +2923,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2936,7 +2936,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i64.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2951,7 +2951,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2968,7 +2968,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2981,7 +2981,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2994,7 +2994,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3009,7 +3009,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3026,7 +3026,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3045,7 +3045,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3066,7 +3066,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3089,7 +3089,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3114,7 +3114,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3127,7 +3127,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3142,7 +3142,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3159,7 +3159,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv32i8.nxv32i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv32i8.nxv32i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3172,7 +3172,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3185,7 +3185,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3200,7 +3200,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3217,7 +3217,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3236,7 +3236,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3257,7 +3257,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3280,7 +3280,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3305,7 +3305,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3318,7 +3318,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3333,7 +3333,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3350,7 +3350,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv32i8.nxv32i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv32i8.nxv32i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3363,7 +3363,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3376,7 +3376,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3391,7 +3391,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3408,7 +3408,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3427,7 +3427,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3448,7 +3448,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3471,7 +3471,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3496,7 +3496,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i8.nxv16i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3509,7 +3509,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv16i8.nxv16i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3524,7 +3524,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv16i8.nxv16i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3541,7 +3541,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i8.nxv8i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3554,7 +3554,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i8.nxv8i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3569,7 +3569,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3586,7 +3586,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3605,7 +3605,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3626,7 +3626,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3649,7 +3649,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv8i8.nxv8i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3674,7 +3674,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3687,7 +3687,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3702,7 +3702,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3719,7 +3719,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3738,7 +3738,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3759,7 +3759,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3782,7 +3782,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3807,7 +3807,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3820,7 +3820,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3835,7 +3835,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3852,7 +3852,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3865,7 +3865,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3878,7 +3878,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3893,7 +3893,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3910,7 +3910,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3929,7 +3929,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3950,7 +3950,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3973,7 +3973,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3998,7 +3998,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4011,7 +4011,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4026,7 +4026,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4043,7 +4043,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4056,7 +4056,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4069,7 +4069,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4084,7 +4084,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4101,7 +4101,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4120,7 +4120,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4141,7 +4141,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4164,7 +4164,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4189,7 +4189,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4202,7 +4202,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4217,7 +4217,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4234,7 +4234,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16i16.nxv16i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4247,7 +4247,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i16.nxv4i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4260,7 +4260,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i16.nxv4i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4275,7 +4275,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4292,7 +4292,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4311,7 +4311,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4332,7 +4332,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4355,7 +4355,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4380,7 +4380,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i16.nxv8i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4393,7 +4393,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8i16.nxv8i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4408,7 +4408,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8i16.nxv8i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4425,7 +4425,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4438,7 +4438,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4453,7 +4453,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4470,7 +4470,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4489,7 +4489,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4510,7 +4510,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4533,7 +4533,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4558,7 +4558,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4571,7 +4571,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4586,7 +4586,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4603,7 +4603,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4616,7 +4616,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4629,7 +4629,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4644,7 +4644,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4661,7 +4661,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4680,7 +4680,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4701,7 +4701,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4724,7 +4724,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4749,7 +4749,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4762,7 +4762,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4777,7 +4777,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4794,7 +4794,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4807,7 +4807,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4820,7 +4820,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4835,7 +4835,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4852,7 +4852,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4871,7 +4871,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4892,7 +4892,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4915,7 +4915,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4940,7 +4940,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4953,7 +4953,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4968,7 +4968,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4985,7 +4985,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4998,7 +4998,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i32.nxv2i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5011,7 +5011,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i32.nxv2i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5026,7 +5026,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5043,7 +5043,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5062,7 +5062,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5083,7 +5083,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5106,7 +5106,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5131,7 +5131,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i32.nxv4i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5144,7 +5144,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i32.nxv4i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5159,7 +5159,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i32.nxv4i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5176,7 +5176,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8i32.nxv8i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5189,7 +5189,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5202,7 +5202,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i8.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5217,7 +5217,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5234,7 +5234,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5253,7 +5253,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5274,7 +5274,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5297,7 +5297,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5322,7 +5322,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5335,7 +5335,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i8.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5350,7 +5350,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i8.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5367,7 +5367,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i8.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i8.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5380,7 +5380,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5393,7 +5393,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i16.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5408,7 +5408,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5425,7 +5425,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5444,7 +5444,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5465,7 +5465,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5488,7 +5488,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5513,7 +5513,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5526,7 +5526,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i16.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5541,7 +5541,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i16.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5558,7 +5558,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i16.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i16.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5571,7 +5571,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5584,7 +5584,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i32.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5599,7 +5599,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5616,7 +5616,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5635,7 +5635,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5656,7 +5656,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5679,7 +5679,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5704,7 +5704,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5717,7 +5717,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i32.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5732,7 +5732,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i32.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5749,7 +5749,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i32.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i32.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5762,7 +5762,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i64.nxv1i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5775,7 +5775,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i64.nxv1i64.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5790,7 +5790,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5807,7 +5807,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5826,7 +5826,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5847,7 +5847,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5870,7 +5870,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5895,7 +5895,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i64.nxv2i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5908,7 +5908,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i64.nxv2i64.i64( undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5923,7 +5923,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i64.nxv2i64.i64( undef, undef, undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5940,7 +5940,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i64.i64(i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i64.nxv4i64.i64( undef, undef, i64* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5953,7 +5953,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i8.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5966,7 +5966,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i8.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5981,7 +5981,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i8.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5998,7 +5998,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i8.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6017,7 +6017,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6038,7 +6038,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6061,7 +6061,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6086,7 +6086,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i8.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6099,7 +6099,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i8.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6114,7 +6114,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i8.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6131,7 +6131,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i8.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6144,7 +6144,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i16.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6157,7 +6157,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i16.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6172,7 +6172,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i16.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6189,7 +6189,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i16.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6208,7 +6208,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6229,7 +6229,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6252,7 +6252,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6277,7 +6277,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i16.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6290,7 +6290,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i16.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6305,7 +6305,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i16.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6322,7 +6322,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i16.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6335,7 +6335,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i32.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6348,7 +6348,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i32.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6363,7 +6363,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6380,7 +6380,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i32.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6399,7 +6399,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6420,7 +6420,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6443,7 +6443,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6468,7 +6468,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i32.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6481,7 +6481,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i32.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6496,7 +6496,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6513,7 +6513,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i32.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6526,7 +6526,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i64.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6539,7 +6539,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i64.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6554,7 +6554,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i64.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6571,7 +6571,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i64.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6590,7 +6590,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6611,7 +6611,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6634,7 +6634,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6659,7 +6659,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i64.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6672,7 +6672,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i64.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6687,7 +6687,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i64.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6704,7 +6704,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i64.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6717,7 +6717,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i8.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6730,7 +6730,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i8.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6745,7 +6745,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i8.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6762,7 +6762,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i8.i64( undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6781,7 +6781,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6802,7 +6802,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6825,7 +6825,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6850,7 +6850,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i8.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6863,7 +6863,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i8.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6878,7 +6878,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i8.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6895,7 +6895,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i8.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6908,7 +6908,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i16.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6921,7 +6921,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i16.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6936,7 +6936,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i16.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6953,7 +6953,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i16.i64( undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6972,7 +6972,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6993,7 +6993,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7016,7 +7016,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7041,7 +7041,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i16.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7054,7 +7054,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i16.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7069,7 +7069,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i16.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7086,7 +7086,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i16.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7099,7 +7099,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i32.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7112,7 +7112,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i32.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7127,7 +7127,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i32.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7144,7 +7144,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i32.i64( undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7163,7 +7163,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7184,7 +7184,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7207,7 +7207,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7232,7 +7232,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i32.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7245,7 +7245,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i32.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7260,7 +7260,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i32.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7277,7 +7277,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i32.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7290,7 +7290,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i64.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7303,7 +7303,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i64.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7318,7 +7318,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i64.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7335,7 +7335,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i64.i64( undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7354,7 +7354,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7375,7 +7375,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7398,7 +7398,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7423,7 +7423,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i64.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7436,7 +7436,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i64.i64( undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7451,7 +7451,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i64.i64( undef, undef, undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7468,7 +7468,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i64.i64( undef, undef, double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask.c @@ -7481,7 +7481,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i8.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7494,7 +7494,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i8.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7509,7 +7509,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i8.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7526,7 +7526,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i8.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7545,7 +7545,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7566,7 +7566,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7589,7 +7589,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7614,7 +7614,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i8.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7627,7 +7627,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i8.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7642,7 +7642,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i8.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7659,7 +7659,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i8.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7672,7 +7672,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i16.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7685,7 +7685,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i16.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7700,7 +7700,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i16.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7717,7 +7717,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i16.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7736,7 +7736,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7757,7 +7757,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7780,7 +7780,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7805,7 +7805,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i16.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7818,7 +7818,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i16.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7833,7 +7833,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i16.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7850,7 +7850,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i16.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7863,7 +7863,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i32.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7876,7 +7876,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i32.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7891,7 +7891,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i32.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7908,7 +7908,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i32.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7927,7 +7927,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7948,7 +7948,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7971,7 +7971,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7996,7 +7996,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i32.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -8009,7 +8009,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i32.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -8024,7 +8024,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i32.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -8041,7 +8041,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i32.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -8054,7 +8054,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i64.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -8067,7 +8067,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i64.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -8082,7 +8082,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i64.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -8099,7 +8099,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i64.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -8118,7 +8118,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -8139,7 +8139,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -8162,7 +8162,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -8187,7 +8187,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i64.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -8200,7 +8200,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i64.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -8215,7 +8215,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i64.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask_mf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask_mf.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask_mf.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask_mf.c @@ -6912,7 +6912,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i8.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6925,7 +6925,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i8.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6940,7 +6940,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i8.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6957,7 +6957,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i8.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6976,7 +6976,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6997,7 +6997,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7020,7 +7020,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7045,7 +7045,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i8.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7058,7 +7058,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i8.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7073,7 +7073,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i8.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7090,7 +7090,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i8.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7109,7 +7109,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7130,7 +7130,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7153,7 +7153,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7178,7 +7178,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i16.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7191,7 +7191,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i16.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7206,7 +7206,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i16.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7223,7 +7223,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i16.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7242,7 +7242,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7263,7 +7263,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7286,7 +7286,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7311,7 +7311,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i16.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7324,7 +7324,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i16.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7339,7 +7339,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i16.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7356,7 +7356,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i16.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7375,7 +7375,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7396,7 +7396,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7419,7 +7419,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7444,7 +7444,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i32.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7457,7 +7457,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i32.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7472,7 +7472,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i32.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7489,7 +7489,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i32.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7508,7 +7508,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7529,7 +7529,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7552,7 +7552,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7577,7 +7577,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i32.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7590,7 +7590,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i32.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7605,7 +7605,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i32.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7622,7 +7622,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i32.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7641,7 +7641,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7662,7 +7662,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7685,7 +7685,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7710,7 +7710,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i64.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7723,7 +7723,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i64.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7738,7 +7738,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i64.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7755,7 +7755,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i64.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7774,7 +7774,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7795,7 +7795,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7818,7 +7818,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7843,7 +7843,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i64.i64( undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -7856,7 +7856,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i64.i64( undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7871,7 +7871,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i64.i64( undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7888,7 +7888,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i64.i64( undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7907,7 +7907,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7928,7 +7928,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7951,7 +7951,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mf.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mf.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mf.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -22,7 +22,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -37,7 +37,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -54,7 +54,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -73,7 +73,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -94,7 +94,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -117,7 +117,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -155,7 +155,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -227,7 +227,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -275,7 +275,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -303,7 +303,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -320,7 +320,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -360,7 +360,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -383,7 +383,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -408,7 +408,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -421,7 +421,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -436,7 +436,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -453,7 +453,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -472,7 +472,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -493,7 +493,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -516,7 +516,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -541,7 +541,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -554,7 +554,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -569,7 +569,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -586,7 +586,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -605,7 +605,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -626,7 +626,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -649,7 +649,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -674,7 +674,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -687,7 +687,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -702,7 +702,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -719,7 +719,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -759,7 +759,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -782,7 +782,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -807,7 +807,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -820,7 +820,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -835,7 +835,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -852,7 +852,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -871,7 +871,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -892,7 +892,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -915,7 +915,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -940,7 +940,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -953,7 +953,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -968,7 +968,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -985,7 +985,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1004,7 +1004,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1025,7 +1025,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1048,7 +1048,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1073,7 +1073,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1086,7 +1086,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1101,7 +1101,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1118,7 +1118,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1137,7 +1137,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1158,7 +1158,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1181,7 +1181,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1206,7 +1206,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1219,7 +1219,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1234,7 +1234,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1251,7 +1251,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1270,7 +1270,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1291,7 +1291,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1314,7 +1314,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1339,7 +1339,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1352,7 +1352,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1367,7 +1367,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1384,7 +1384,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1403,7 +1403,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1424,7 +1424,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1447,7 +1447,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1472,7 +1472,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1485,7 +1485,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1500,7 +1500,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1517,7 +1517,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1536,7 +1536,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1557,7 +1557,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1580,7 +1580,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1605,7 +1605,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1618,7 +1618,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1633,7 +1633,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1650,7 +1650,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1669,7 +1669,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1690,7 +1690,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1713,7 +1713,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1738,7 +1738,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1751,7 +1751,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1766,7 +1766,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1783,7 +1783,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1802,7 +1802,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1823,7 +1823,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1846,7 +1846,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1871,7 +1871,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -1884,7 +1884,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1899,7 +1899,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1916,7 +1916,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1935,7 +1935,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1956,7 +1956,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1979,7 +1979,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2004,7 +2004,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2017,7 +2017,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2032,7 +2032,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2049,7 +2049,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2068,7 +2068,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2089,7 +2089,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2112,7 +2112,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2137,7 +2137,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2150,7 +2150,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2165,7 +2165,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2182,7 +2182,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2201,7 +2201,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2222,7 +2222,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2245,7 +2245,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2270,7 +2270,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2283,7 +2283,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2298,7 +2298,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2315,7 +2315,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2334,7 +2334,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2355,7 +2355,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2378,7 +2378,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2403,7 +2403,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2416,7 +2416,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2431,7 +2431,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2448,7 +2448,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2467,7 +2467,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2488,7 +2488,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2511,7 +2511,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2536,7 +2536,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2549,7 +2549,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2564,7 +2564,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2581,7 +2581,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2600,7 +2600,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2621,7 +2621,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2644,7 +2644,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2669,7 +2669,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2682,7 +2682,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2697,7 +2697,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2714,7 +2714,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2733,7 +2733,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2754,7 +2754,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2777,7 +2777,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2802,7 +2802,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2815,7 +2815,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2830,7 +2830,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2847,7 +2847,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2866,7 +2866,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2887,7 +2887,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2910,7 +2910,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2935,7 +2935,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -2948,7 +2948,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2963,7 +2963,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2980,7 +2980,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2999,7 +2999,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3020,7 +3020,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3043,7 +3043,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3068,7 +3068,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3081,7 +3081,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3096,7 +3096,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3113,7 +3113,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3132,7 +3132,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3153,7 +3153,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3176,7 +3176,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3201,7 +3201,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3214,7 +3214,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3229,7 +3229,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3246,7 +3246,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3265,7 +3265,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3286,7 +3286,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3309,7 +3309,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3334,7 +3334,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3347,7 +3347,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3362,7 +3362,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3379,7 +3379,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3398,7 +3398,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3419,7 +3419,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3442,7 +3442,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3467,7 +3467,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i8.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3480,7 +3480,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i8.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3495,7 +3495,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3512,7 +3512,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3531,7 +3531,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3552,7 +3552,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3575,7 +3575,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i8.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3600,7 +3600,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3613,7 +3613,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3628,7 +3628,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3645,7 +3645,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3664,7 +3664,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3685,7 +3685,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3708,7 +3708,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3733,7 +3733,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3746,7 +3746,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3761,7 +3761,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3778,7 +3778,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3797,7 +3797,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3818,7 +3818,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3841,7 +3841,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3866,7 +3866,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i16.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -3879,7 +3879,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i16.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3894,7 +3894,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3911,7 +3911,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3930,7 +3930,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3951,7 +3951,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3974,7 +3974,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i16.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3999,7 +3999,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4012,7 +4012,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4027,7 +4027,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4044,7 +4044,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4063,7 +4063,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4084,7 +4084,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4107,7 +4107,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4132,7 +4132,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4145,7 +4145,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4160,7 +4160,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4177,7 +4177,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4196,7 +4196,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4217,7 +4217,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4240,7 +4240,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4265,7 +4265,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i32.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4278,7 +4278,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i32.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4293,7 +4293,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4310,7 +4310,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4329,7 +4329,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4350,7 +4350,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4373,7 +4373,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i32.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4398,7 +4398,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i8.nxv1i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4411,7 +4411,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i8.nxv1i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4426,7 +4426,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4443,7 +4443,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4462,7 +4462,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4483,7 +4483,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4506,7 +4506,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i8.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4531,7 +4531,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i8.nxv2i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4544,7 +4544,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i8.nxv2i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4559,7 +4559,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4576,7 +4576,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4595,7 +4595,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4616,7 +4616,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4639,7 +4639,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i8.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4664,7 +4664,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4i8.nxv4i64.i64( undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4677,7 +4677,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4i8.nxv4i64.i64( undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4692,7 +4692,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4709,7 +4709,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4728,7 +4728,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4749,7 +4749,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4772,7 +4772,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i64.i64(i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4i8.nxv4i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i8* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4797,7 +4797,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4810,7 +4810,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4825,7 +4825,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4842,7 +4842,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4861,7 +4861,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4882,7 +4882,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4905,7 +4905,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4930,7 +4930,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i8.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -4943,7 +4943,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i8.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4958,7 +4958,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4975,7 +4975,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4994,7 +4994,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5015,7 +5015,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5038,7 +5038,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i8.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5063,7 +5063,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5076,7 +5076,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5091,7 +5091,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5108,7 +5108,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5127,7 +5127,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5148,7 +5148,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5171,7 +5171,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5196,7 +5196,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i16.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5209,7 +5209,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i16.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5224,7 +5224,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5241,7 +5241,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5260,7 +5260,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5281,7 +5281,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5304,7 +5304,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i16.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5329,7 +5329,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5342,7 +5342,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5357,7 +5357,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5374,7 +5374,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5393,7 +5393,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5414,7 +5414,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5437,7 +5437,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5462,7 +5462,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i32.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5475,7 +5475,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i32.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5490,7 +5490,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5507,7 +5507,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5526,7 +5526,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5547,7 +5547,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5570,7 +5570,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i32.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5595,7 +5595,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i16.nxv1i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5608,7 +5608,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i16.nxv1i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5623,7 +5623,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5640,7 +5640,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5659,7 +5659,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5680,7 +5680,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5703,7 +5703,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i16.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5728,7 +5728,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2i16.nxv2i64.i64( undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5741,7 +5741,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2i16.nxv2i64.i64( undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5756,7 +5756,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5773,7 +5773,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5792,7 +5792,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5813,7 +5813,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5836,7 +5836,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i64.i64(i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2i16.nxv2i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i16* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5861,7 +5861,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i8.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -5874,7 +5874,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i8.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5889,7 +5889,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5906,7 +5906,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5925,7 +5925,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5946,7 +5946,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5969,7 +5969,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i8.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5994,7 +5994,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i16.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6007,7 +6007,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i16.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6022,7 +6022,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6039,7 +6039,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6058,7 +6058,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6079,7 +6079,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6102,7 +6102,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i16.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6127,7 +6127,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i32.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6140,7 +6140,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i32.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6155,7 +6155,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6172,7 +6172,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6191,7 +6191,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6212,7 +6212,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6235,7 +6235,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i32.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6260,7 +6260,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1i32.nxv1i64.i64( undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6273,7 +6273,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1i32.nxv1i64.i64( undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6288,7 +6288,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6305,7 +6305,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6324,7 +6324,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6345,7 +6345,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6368,7 +6368,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i64.i64(i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1i32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, i32* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6393,7 +6393,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i8.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6406,7 +6406,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i8.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6421,7 +6421,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i8.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6438,7 +6438,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i8.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6457,7 +6457,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6478,7 +6478,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6501,7 +6501,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i8.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6526,7 +6526,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i16.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6539,7 +6539,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i16.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6554,7 +6554,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i16.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6571,7 +6571,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i16.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6590,7 +6590,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6611,7 +6611,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6634,7 +6634,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i16.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6659,7 +6659,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i32.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6672,7 +6672,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i32.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6687,7 +6687,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i32.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6704,7 +6704,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i32.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6723,7 +6723,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6744,7 +6744,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6767,7 +6767,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i32.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6792,7 +6792,7 @@ // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i64.i64( undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 @@ -6805,7 +6805,7 @@ // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i64.i64( undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6820,7 +6820,7 @@ // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i64.i64( undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6837,7 +6837,7 @@ // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i64.i64( undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6856,7 +6856,7 @@ // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6877,7 +6877,7 @@ // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6900,7 +6900,7 @@ // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i64.i64( undef, undef, undef, undef, undef, undef, undef, undef, float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -842,13 +842,14 @@ } // For unit stride segment load - // Input: (pointer, vl) + // Input: (passthru, pointer, vl) class RISCVUSSegLoad : Intrinsic, !add(nf, -1))), - [LLVMPointerToElt<0>, llvm_anyint_ty], - [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { - let VLOperand = 1; + !listconcat(!listsplat(LLVMMatchType<0>, nf), + [LLVMPointerToElt<0>, llvm_anyint_ty]), + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { + let VLOperand = !add(nf, 1); } // For unit stride segment load with mask // Input: (maskedoff, pointer, mask, vl, policy) @@ -865,16 +866,17 @@ } // For unit stride fault-only-first segment load - // Input: (pointer, vl) + // Input: (passthru, pointer, vl) // Output: (data, vl) // NOTE: We model this with default memory properties since we model writing // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. class RISCVUSSegLoadFF : Intrinsic, !add(nf, -1)), [llvm_anyint_ty]), - [LLVMPointerToElt<0>, LLVMMatchType<1>], - [NoCapture>]>, RISCVVIntrinsic { - let VLOperand = 1; + !listconcat(!listsplat(LLVMMatchType<0>, nf), + [LLVMPointerToElt<0>, LLVMMatchType<1>]), + [NoCapture>]>, RISCVVIntrinsic { + let VLOperand = !add(nf, 1); } // For unit stride fault-only-first segment load with mask // Input: (maskedoff, pointer, mask, vl, policy) @@ -894,13 +896,14 @@ } // For stride segment load - // Input: (pointer, offset, vl) + // Input: (passthru, pointer, offset, vl) class RISCVSSegLoad : Intrinsic, !add(nf, -1))), - [LLVMPointerToElt<0>, llvm_anyint_ty, LLVMMatchType<1>], - [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { - let VLOperand = 2; + !listconcat(!listsplat(LLVMMatchType<0>, nf), + [LLVMPointerToElt<0>, llvm_anyint_ty, LLVMMatchType<1>]), + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { + let VLOperand = !add(nf, 2); } // For stride segment load with mask // Input: (maskedoff, pointer, offset, mask, vl, policy) @@ -918,13 +921,14 @@ } // For indexed segment load - // Input: (pointer, index, vl) + // Input: (passthru, pointer, index, vl) class RISCVISegLoad : Intrinsic, !add(nf, -1))), - [LLVMPointerToElt<0>, llvm_anyvector_ty, llvm_anyint_ty], - [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { - let VLOperand = 2; + !listconcat(!listsplat(LLVMMatchType<0>, nf), + [LLVMPointerToElt<0>, llvm_anyvector_ty, llvm_anyint_ty]), + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { + let VLOperand = !add(nf, 2); } // For indexed segment load with mask // Input: (maskedoff, pointer, index, mask, vl, policy) diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -124,6 +124,7 @@ struct VLSEGPseudo { uint16_t NF : 4; uint16_t Masked : 1; + uint16_t IsTU : 1; uint16_t Strided : 1; uint16_t FF : 1; uint16_t Log2SEW : 3; @@ -134,6 +135,7 @@ struct VLXSEGPseudo { uint16_t NF : 4; uint16_t Masked : 1; + uint16_t IsTU : 1; uint16_t Ordered : 1; uint16_t Log2SEW : 3; uint16_t LMUL : 3; diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -311,6 +311,10 @@ Operands.push_back(Glue); } +static bool isAllUndef(ArrayRef Values) { + return llvm::all_of(Values, [](SDValue V) { return V->isUndef(); }); +} + void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided) { SDLoc DL(Node); @@ -321,19 +325,21 @@ unsigned CurOp = 2; SmallVector Operands; - if (IsMasked) { - SmallVector Regs(Node->op_begin() + CurOp, - Node->op_begin() + CurOp + NF); - SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL); - Operands.push_back(MaskedOff); - CurOp += NF; + + SmallVector Regs(Node->op_begin() + CurOp, + Node->op_begin() + CurOp + NF); + bool IsTU = IsMasked || !isAllUndef(Regs); + if (IsTU) { + SDValue Merge = createTuple(*CurDAG, Regs, NF, LMUL); + Operands.push_back(Merge); } + CurOp += NF; addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided, Operands, /*IsLoad=*/true); const RISCV::VLSEGPseudo *P = - RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, Log2SEW, + RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW, static_cast(LMUL)); MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands); @@ -362,20 +368,22 @@ unsigned CurOp = 2; SmallVector Operands; - if (IsMasked) { - SmallVector Regs(Node->op_begin() + CurOp, - Node->op_begin() + CurOp + NF); + + SmallVector Regs(Node->op_begin() + CurOp, + Node->op_begin() + CurOp + NF); + bool IsTU = IsMasked || !isAllUndef(Regs); + if (IsTU) { SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL); Operands.push_back(MaskedOff); - CurOp += NF; } + CurOp += NF; addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, /*IsStridedOrIndexed*/ false, Operands, /*IsLoad=*/true); const RISCV::VLSEGPseudo *P = - RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true, + RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, /*Strided*/ false, /*FF*/ true, Log2SEW, static_cast(LMUL)); MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, MVT::Glue, Operands); @@ -407,13 +415,15 @@ unsigned CurOp = 2; SmallVector Operands; - if (IsMasked) { - SmallVector Regs(Node->op_begin() + CurOp, - Node->op_begin() + CurOp + NF); + + SmallVector Regs(Node->op_begin() + CurOp, + Node->op_begin() + CurOp + NF); + bool IsTU = IsMasked || !isAllUndef(Regs); + if (IsTU) { SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL); Operands.push_back(MaskedOff); - CurOp += NF; } + CurOp += NF; MVT IndexVT; addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, @@ -430,7 +440,7 @@ "values when XLEN=32"); } const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo( - NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast(LMUL), + NF, IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast(LMUL), static_cast(IndexLMUL)); MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4917,9 +4917,12 @@ SmallVector ContainerVTs(NF, ContainerVT); ContainerVTs.push_back(MVT::Other); SDVTList VTs = DAG.getVTList(ContainerVTs); + SmallVector Ops = {Load->getChain(), IntID}; + Ops.insert(Ops.end(), NF, DAG.getUNDEF(ContainerVT)); + Ops.push_back(Op.getOperand(2)); + Ops.push_back(VL); SDValue Result = - DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, - {Load->getChain(), IntID, Op.getOperand(2), VL}, + DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, Load->getMemoryVT(), Load->getMemOperand()); SmallVector Results; for (unsigned int RetIdx = 0; RetIdx < NF; RetIdx++) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -503,9 +503,10 @@ let PrimaryKeyName = "getVSXPseudo"; } -class RISCVVLSEG N, bit M, bit Str, bit F, bits<3> S, bits<3> L> { +class RISCVVLSEG N, bit M, bit TU, bit Str, bit F, bits<3> S, bits<3> L> { bits<4> NF = N; bits<1> Masked = M; + bits<1> IsTU = TU; bits<1> Strided = Str; bits<1> FF = F; bits<3> Log2SEW = S; @@ -516,14 +517,15 @@ def RISCVVLSEGTable : GenericTable { let FilterClass = "RISCVVLSEG"; let CppTypeName = "VLSEGPseudo"; - let Fields = ["NF", "Masked", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"]; - let PrimaryKey = ["NF", "Masked", "Strided", "FF", "Log2SEW", "LMUL"]; + let Fields = ["NF", "Masked", "IsTU", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"]; + let PrimaryKey = ["NF", "Masked", "IsTU", "Strided", "FF", "Log2SEW", "LMUL"]; let PrimaryKeyName = "getVLSEGPseudo"; } -class RISCVVLXSEG N, bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> { +class RISCVVLXSEG N, bit M, bit TU, bit O, bits<3> S, bits<3> L, bits<3> IL> { bits<4> NF = N; bits<1> Masked = M; + bits<1> IsTU = TU; bits<1> Ordered = O; bits<3> Log2SEW = S; bits<3> LMUL = L; @@ -534,8 +536,8 @@ def RISCVVLXSEGTable : GenericTable { let FilterClass = "RISCVVLXSEG"; let CppTypeName = "VLXSEGPseudo"; - let Fields = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"]; - let PrimaryKey = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"]; + let Fields = ["NF", "Masked", "IsTU", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"]; + let PrimaryKey = ["NF", "Masked", "IsTU", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"]; let PrimaryKeyName = "getVLXSEGPseudo"; } @@ -1313,13 +1315,29 @@ Pseudo<(outs RetClass:$rd), (ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVVLSEG.val, VLMul> { + RISCVVLSEG.val, VLMul> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoUSSegLoadNoMaskTU NF, bit isFF>: + Pseudo<(outs RetClass:$rd), + (ins RetClass:$dest, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVVLSEG.val, VLMul> { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; let HasVLOp = 1; let HasSEWOp = 1; let HasDummyMask = 1; + let HasMergeOp = 1; + let Constraints = "$rd = $dest"; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -1328,7 +1346,7 @@ (ins GetVRegNoV0.R:$merge, GPR:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>, RISCVVPseudo, - RISCVVLSEG.val, VLMul> { + RISCVVLSEG.val, VLMul> { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -1345,7 +1363,7 @@ Pseudo<(outs RetClass:$rd), (ins GPR:$rs1, GPR:$offset, AVL:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVVLSEG.val, VLMul> { + RISCVVLSEG.val, VLMul> { let mayLoad = 1; let mayLoad = 1; let mayStore = 0; @@ -1356,13 +1374,30 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoSSegLoadNoMaskTU NF>: + Pseudo<(outs RetClass:$rd), + (ins RetClass:$merge, GPR:$rs1, GPR:$offset, AVL:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVVLSEG.val, VLMul> { + let mayLoad = 1; + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let HasMergeOp = 1; + let Constraints = "$rd = $merge"; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoSSegLoadMask NF>: Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPR:$rs1, GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>, RISCVVPseudo, - RISCVVLSEG.val, VLMul> { + RISCVVLSEG.val, VLMul> { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -1380,7 +1415,7 @@ Pseudo<(outs RetClass:$rd), (ins GPR:$rs1, IdxClass:$offset, AVL:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVVLXSEG.val, VLMul, LMUL> { + RISCVVLXSEG.val, VLMul, LMUL> { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -1393,6 +1428,25 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoISegLoadNoMaskTU LMUL, + bits<4> NF, bit Ordered>: + Pseudo<(outs RetClass:$rd), + (ins RetClass:$merge, GPR:$rs1, IdxClass:$offset, AVL:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVVLXSEG.val, VLMul, LMUL> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + // For vector indexed segment loads, the destination vector register groups + // cannot overlap the source vector register group + let Constraints = "@earlyclobber $rd, $rd = $merge"; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let HasMergeOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoISegLoadMask LMUL, bits<4> NF, bit Ordered>: Pseudo<(outs GetVRegNoV0.R:$rd), @@ -1400,7 +1454,7 @@ IdxClass:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>, RISCVVPseudo, - RISCVVLXSEG.val, VLMul, LMUL> { + RISCVVLXSEG.val, VLMul, LMUL> { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -2751,6 +2805,8 @@ defvar FFStr = !if(isFF, "FF", ""); def nf # "E" # eew # FFStr # "_V_" # LInfo : VPseudoUSSegLoadNoMask; + def nf # "E" # eew # FFStr # "_V_" # LInfo # "_TU" : + VPseudoUSSegLoadNoMaskTU; def nf # "E" # eew # FFStr # "_V_" # LInfo # "_MASK" : VPseudoUSSegLoadMask; } @@ -2767,6 +2823,7 @@ foreach nf = NFSet.L in { defvar vreg = SegRegClass.RC; def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask; + def nf # "E" # eew # "_V_" # LInfo # "_TU" : VPseudoSSegLoadNoMaskTU; def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask; } } @@ -2793,6 +2850,9 @@ def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo : VPseudoISegLoadNoMask; + def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_TU" : + VPseudoISegLoadNoMaskTU; def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" : VPseudoISegLoadMask; diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll --- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll +++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll @@ -85,7 +85,7 @@ ; CHECK-NEXT: addi sp, sp, 32 ; CHECK-NEXT: ret entry: - %i = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(half* nonnull poison, poison, i64 55) + %i = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64( undef, undef, half* nonnull poison, poison, i64 55) %i1 = extractvalue { , } %i, 0 %i2 = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64( poison, poison, poison, zeroinitializer, i64 36, i64 0) call void @func() @@ -98,7 +98,7 @@ } declare void @func() -declare { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(half* nocapture, , i64) +declare { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64( , , half* nocapture, , i64) declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64(, , , , i64, i64 immarg) declare @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(, , , , i64, i64 immarg) declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64(, , , i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll @@ -51,7 +51,7 @@ ; SPILL-O2-NEXT: addi sp, sp, 16 ; SPILL-O2-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32( undef, undef, i32* %base, i32 %vl) call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() %1 = extractvalue {,} %0, 1 @@ -105,7 +105,7 @@ ; SPILL-O2-NEXT: addi sp, sp, 16 ; SPILL-O2-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32( undef, undef, i32* %base, i32 %vl) call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() %1 = extractvalue {,} %0, 1 @@ -163,7 +163,7 @@ ; SPILL-O2-NEXT: addi sp, sp, 16 ; SPILL-O2-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32( undef, undef, i32* %base, i32 %vl) call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() %1 = extractvalue {,} %0, 1 @@ -221,7 +221,7 @@ ; SPILL-O2-NEXT: addi sp, sp, 16 ; SPILL-O2-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32( undef, undef, i32* %base, i32 %vl) call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() %1 = extractvalue {,} %0, 1 @@ -285,15 +285,15 @@ ; SPILL-O2-NEXT: addi sp, sp, 16 ; SPILL-O2-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32( undef, undef, undef, i32* %base, i32 %vl) call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() %1 = extractvalue {,,} %0, 1 ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv1i32(i32* , i32) -declare {,} @llvm.riscv.vlseg2.nxv2i32(i32* , i32) -declare {,} @llvm.riscv.vlseg2.nxv4i32(i32* , i32) -declare {,} @llvm.riscv.vlseg2.nxv8i32(i32* , i32) -declare {,,} @llvm.riscv.vlseg3.nxv4i32(i32* , i32) +declare {,} @llvm.riscv.vlseg2.nxv1i32(,, i32* , i32) +declare {,} @llvm.riscv.vlseg2.nxv2i32(,, i32* , i32) +declare {,} @llvm.riscv.vlseg2.nxv4i32(,, i32* , i32) +declare {,} @llvm.riscv.vlseg2.nxv8i32(,, i32* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv4i32(,,, i32* , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll @@ -51,7 +51,7 @@ ; SPILL-O2-NEXT: addi sp, sp, 16 ; SPILL-O2-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32( undef, undef, i32* %base, i64 %vl) call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() %1 = extractvalue {,} %0, 1 @@ -105,7 +105,7 @@ ; SPILL-O2-NEXT: addi sp, sp, 16 ; SPILL-O2-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32( undef, undef, i32* %base, i64 %vl) call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() %1 = extractvalue {,} %0, 1 @@ -163,7 +163,7 @@ ; SPILL-O2-NEXT: addi sp, sp, 16 ; SPILL-O2-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32( undef, undef, i32* %base, i64 %vl) call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() %1 = extractvalue {,} %0, 1 @@ -221,7 +221,7 @@ ; SPILL-O2-NEXT: addi sp, sp, 16 ; SPILL-O2-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32( undef, undef, i32* %base, i64 %vl) call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() %1 = extractvalue {,} %0, 1 @@ -285,15 +285,15 @@ ; SPILL-O2-NEXT: addi sp, sp, 16 ; SPILL-O2-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32( undef, undef, undef, i32* %base, i64 %vl) call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() %1 = extractvalue {,,} %0, 1 ret %1 } -declare {,} @llvm.riscv.vlseg2.nxv1i32(i32* , i64) -declare {,} @llvm.riscv.vlseg2.nxv2i32(i32* , i64) -declare {,} @llvm.riscv.vlseg2.nxv4i32(i32* , i64) -declare {,} @llvm.riscv.vlseg2.nxv8i32(i32* , i64) -declare {,,} @llvm.riscv.vlseg3.nxv4i32(i32* , i64) +declare {,} @llvm.riscv.vlseg2.nxv1i32(,, i32* , i64) +declare {,} @llvm.riscv.vlseg2.nxv2i32(,, i32* , i64) +declare {,} @llvm.riscv.vlseg2.nxv4i32(,, i32* , i64) +declare {,} @llvm.riscv.vlseg2.nxv8i32(,, i32* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv4i32(,,, i32* , i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll @@ -2,7 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+experimental-zvfh \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(,, i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i32, i32) define @test_vloxseg2_nxv16i16_nxv16i16(i16* %base, %index, i32 %vl) { @@ -13,7 +13,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -32,7 +32,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8(i16*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8(,, i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8(,, i16*, , , i32, i32) define @test_vloxseg2_nxv16i16_nxv16i8(i16* %base, %index, i32 %vl) { @@ -43,7 +43,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -62,7 +62,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32(i16*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32(,, i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32(,, i16*, , , i32, i32) define @test_vloxseg2_nxv16i16_nxv16i32(i16* %base, %index, i32 %vl) { @@ -73,7 +73,7 @@ ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -92,7 +92,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8(i8*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8(,, i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8(,, i8*, , , i32, i32) define @test_vloxseg2_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { @@ -103,7 +103,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -122,7 +122,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32(i8*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32(,, i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32(,, i8*, , , i32, i32) define @test_vloxseg2_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { @@ -133,7 +133,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -152,7 +152,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16(i8*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16(,, i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16(,, i8*, , , i32, i32) define @test_vloxseg2_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { @@ -163,7 +163,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -182,7 +182,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8(i8*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8(,,, i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { @@ -193,7 +193,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -214,7 +214,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32(i8*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32(,,, i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { @@ -225,7 +225,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -246,7 +246,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16(i8*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16(,,, i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { @@ -257,7 +257,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -278,7 +278,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8(i8*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { @@ -289,7 +289,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -311,7 +311,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32(i8*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { @@ -322,7 +322,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -344,7 +344,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16(i8*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { @@ -355,7 +355,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -377,7 +377,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8(i8*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { @@ -388,7 +388,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -411,7 +411,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32(i8*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { @@ -422,7 +422,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -445,7 +445,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16(i8*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { @@ -456,7 +456,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -479,7 +479,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8(i8*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { @@ -490,7 +490,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -514,7 +514,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32(i8*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { @@ -525,7 +525,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -549,7 +549,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16(i8*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { @@ -560,7 +560,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -584,7 +584,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { @@ -595,7 +595,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -620,7 +620,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { @@ -631,7 +631,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -656,7 +656,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { @@ -667,7 +667,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -692,7 +692,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { @@ -703,7 +703,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -729,7 +729,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { @@ -740,7 +740,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -766,7 +766,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { @@ -777,7 +777,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -803,7 +803,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16(i8*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16(,, i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16(,, i8*, , , i32, i32) define @test_vloxseg2_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl) { @@ -814,7 +814,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -833,7 +833,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8(i8*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8(,, i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8(,, i8*, , , i32, i32) define @test_vloxseg2_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl) { @@ -844,7 +844,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -863,7 +863,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32(i8*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32(,, i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32(,, i8*, , , i32, i32) define @test_vloxseg2_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl) { @@ -874,7 +874,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -893,7 +893,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16(i8*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16(,,, i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl) { @@ -904,7 +904,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -924,7 +924,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8(i8*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8(,,, i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl) { @@ -935,7 +935,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -956,7 +956,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32(i8*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32(,,, i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl) { @@ -967,7 +967,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -987,7 +987,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16(i8*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl) { @@ -998,7 +998,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1020,7 +1020,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8(i8*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl) { @@ -1031,7 +1031,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1053,7 +1053,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32(i8*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl) { @@ -1064,7 +1064,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1085,7 +1085,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32(i32*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32(,, i32*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32(,, i32*, , , i32, i32) define @test_vloxseg2_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { @@ -1096,7 +1096,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1115,7 +1115,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8(i32*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8(,, i32*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8(,, i32*, , , i32, i32) define @test_vloxseg2_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { @@ -1126,7 +1126,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1145,7 +1145,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16(i32*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16(,, i32*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16(,, i32*, , , i32, i32) define @test_vloxseg2_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { @@ -1156,7 +1156,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1175,7 +1175,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32(i32*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32(,,, i32*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32(,,, i32*, , , i32, i32) define @test_vloxseg3_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { @@ -1186,7 +1186,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32( undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1207,7 +1207,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8(i32*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8(,,, i32*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8(,,, i32*, , , i32, i32) define @test_vloxseg3_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { @@ -1218,7 +1218,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8( undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1239,7 +1239,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16(i32*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16(,,, i32*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16(,,, i32*, , , i32, i32) define @test_vloxseg3_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { @@ -1250,7 +1250,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16( undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1271,7 +1271,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32(i32*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32(,,,, i32*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32(,,,, i32*, , , i32, i32) define @test_vloxseg4_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { @@ -1282,7 +1282,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32( undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1304,7 +1304,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8(i32*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8(,,,, i32*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8(,,,, i32*, , , i32, i32) define @test_vloxseg4_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { @@ -1315,7 +1315,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8( undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1337,7 +1337,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16(i32*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16(,,,, i32*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16(,,,, i32*, , , i32, i32) define @test_vloxseg4_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { @@ -1348,7 +1348,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16( undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1370,7 +1370,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32(i32*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32(,,,,, i32*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32(,,,,, i32*, , , i32, i32) define @test_vloxseg5_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { @@ -1381,7 +1381,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1404,7 +1404,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8(i32*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8(,,,,, i32*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8(,,,,, i32*, , , i32, i32) define @test_vloxseg5_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { @@ -1415,7 +1415,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1438,7 +1438,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16(i32*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16(,,,,, i32*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16(,,,,, i32*, , , i32, i32) define @test_vloxseg5_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { @@ -1449,7 +1449,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1472,7 +1472,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32(i32*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32(,,,,,, i32*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32(,,,,,, i32*, , , i32, i32) define @test_vloxseg6_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { @@ -1483,7 +1483,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1507,7 +1507,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8(i32*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8(,,,,,, i32*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8(,,,,,, i32*, , , i32, i32) define @test_vloxseg6_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { @@ -1518,7 +1518,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1542,7 +1542,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16(i32*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16(,,,,,, i32*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16(,,,,,, i32*, , , i32, i32) define @test_vloxseg6_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { @@ -1553,7 +1553,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1577,7 +1577,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32(i32*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32(,,,,,,, i32*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32(,,,,,,, i32*, , , i32, i32) define @test_vloxseg7_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { @@ -1588,7 +1588,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1613,7 +1613,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8(i32*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8(,,,,,,, i32*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8(,,,,,,, i32*, , , i32, i32) define @test_vloxseg7_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { @@ -1624,7 +1624,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1649,7 +1649,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16(i32*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16(,,,,,,, i32*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16(,,,,,,, i32*, , , i32, i32) define @test_vloxseg7_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { @@ -1660,7 +1660,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1685,7 +1685,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32(i32*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32(,,,,,,,, i32*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, i32*, , , i32, i32) define @test_vloxseg8_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { @@ -1696,7 +1696,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1722,7 +1722,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8(i32*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8(,,,,,,,, i32*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, i32*, , , i32, i32) define @test_vloxseg8_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { @@ -1733,7 +1733,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1759,7 +1759,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16(i32*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16(,,,,,,,, i32*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, i32*, , , i32, i32) define @test_vloxseg8_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { @@ -1770,7 +1770,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1796,7 +1796,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16(i16*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16(,, i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16(,, i16*, , , i32, i32) define @test_vloxseg2_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { @@ -1807,7 +1807,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1826,7 +1826,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8(i16*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8(,, i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8(,, i16*, , , i32, i32) define @test_vloxseg2_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { @@ -1837,7 +1837,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1856,7 +1856,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32(i16*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32(,, i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32(,, i16*, , , i32, i32) define @test_vloxseg2_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { @@ -1867,7 +1867,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1886,7 +1886,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16(i16*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16(,,, i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { @@ -1897,7 +1897,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1918,7 +1918,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8(i16*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8(,,, i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { @@ -1929,7 +1929,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1950,7 +1950,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32(i16*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32(,,, i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { @@ -1961,7 +1961,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1981,7 +1981,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16(i16*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { @@ -1992,7 +1992,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2014,7 +2014,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8(i16*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { @@ -2025,7 +2025,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2047,7 +2047,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32(i16*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { @@ -2058,7 +2058,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2080,7 +2080,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16(i16*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16(,,,,, i16*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16(,,,,, i16*, , , i32, i32) define @test_vloxseg5_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { @@ -2091,7 +2091,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2114,7 +2114,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8(i16*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8(,,,,, i16*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8(,,,,, i16*, , , i32, i32) define @test_vloxseg5_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { @@ -2125,7 +2125,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2148,7 +2148,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32(i16*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32(,,,,, i16*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32(,,,,, i16*, , , i32, i32) define @test_vloxseg5_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { @@ -2159,7 +2159,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2182,7 +2182,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16(i16*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16(,,,,,, i16*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16(,,,,,, i16*, , , i32, i32) define @test_vloxseg6_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { @@ -2193,7 +2193,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2217,7 +2217,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8(i16*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8(,,,,,, i16*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8(,,,,,, i16*, , , i32, i32) define @test_vloxseg6_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { @@ -2228,7 +2228,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2252,7 +2252,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32(i16*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32(,,,,,, i16*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32(,,,,,, i16*, , , i32, i32) define @test_vloxseg6_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { @@ -2263,7 +2263,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2287,7 +2287,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16(i16*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16(,,,,,,, i16*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16(,,,,,,, i16*, , , i32, i32) define @test_vloxseg7_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { @@ -2298,7 +2298,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2323,7 +2323,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8(i16*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8(,,,,,,, i16*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8(,,,,,,, i16*, , , i32, i32) define @test_vloxseg7_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { @@ -2334,7 +2334,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2359,7 +2359,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32(i16*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32(,,,,,,, i16*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32(,,,,,,, i16*, , , i32, i32) define @test_vloxseg7_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { @@ -2370,7 +2370,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2395,7 +2395,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16(i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16(,,,,,,,, i16*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, i16*, , , i32, i32) define @test_vloxseg8_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { @@ -2406,7 +2406,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2432,7 +2432,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8(i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8(,,,,,,,, i16*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, i16*, , , i32, i32) define @test_vloxseg8_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { @@ -2443,7 +2443,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2469,7 +2469,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32(i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32(,,,,,,,, i16*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, i16*, , , i32, i32) define @test_vloxseg8_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { @@ -2480,7 +2480,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2506,7 +2506,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8(i32*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8(,, i32*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8(,, i32*, , , i32, i32) define @test_vloxseg2_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { @@ -2517,7 +2517,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2536,7 +2536,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32(i32*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32(,, i32*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32(,, i32*, , , i32, i32) define @test_vloxseg2_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { @@ -2547,7 +2547,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2566,7 +2566,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16(i32*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16(,, i32*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16(,, i32*, , , i32, i32) define @test_vloxseg2_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { @@ -2577,7 +2577,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2596,7 +2596,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8(i32*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8(,,, i32*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8(,,, i32*, , , i32, i32) define @test_vloxseg3_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { @@ -2607,7 +2607,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8( undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2628,7 +2628,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32(i32*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32(,,, i32*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32(,,, i32*, , , i32, i32) define @test_vloxseg3_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { @@ -2639,7 +2639,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32( undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2660,7 +2660,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16(i32*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16(,,, i32*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16(,,, i32*, , , i32, i32) define @test_vloxseg3_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { @@ -2671,7 +2671,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16( undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2692,7 +2692,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8(i32*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8(,,,, i32*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8(,,,, i32*, , , i32, i32) define @test_vloxseg4_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { @@ -2703,7 +2703,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8( undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2725,7 +2725,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32(i32*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32(,,,, i32*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32(,,,, i32*, , , i32, i32) define @test_vloxseg4_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { @@ -2736,7 +2736,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32( undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2758,7 +2758,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16(i32*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16(,,,, i32*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16(,,,, i32*, , , i32, i32) define @test_vloxseg4_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { @@ -2769,7 +2769,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16( undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2791,7 +2791,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8(i32*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8(,,,,, i32*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8(,,,,, i32*, , , i32, i32) define @test_vloxseg5_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { @@ -2802,7 +2802,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2825,7 +2825,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32(i32*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32(,,,,, i32*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32(,,,,, i32*, , , i32, i32) define @test_vloxseg5_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { @@ -2836,7 +2836,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2859,7 +2859,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16(i32*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16(,,,,, i32*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16(,,,,, i32*, , , i32, i32) define @test_vloxseg5_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { @@ -2870,7 +2870,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2893,7 +2893,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8(i32*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8(,,,,,, i32*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8(,,,,,, i32*, , , i32, i32) define @test_vloxseg6_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { @@ -2904,7 +2904,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2928,7 +2928,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32(i32*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32(,,,,,, i32*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32(,,,,,, i32*, , , i32, i32) define @test_vloxseg6_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { @@ -2939,7 +2939,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2963,7 +2963,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16(i32*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16(,,,,,, i32*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16(,,,,,, i32*, , , i32, i32) define @test_vloxseg6_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { @@ -2974,7 +2974,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2998,7 +2998,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8(i32*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8(,,,,,,, i32*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8(,,,,,,, i32*, , , i32, i32) define @test_vloxseg7_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { @@ -3009,7 +3009,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3034,7 +3034,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32(i32*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32(,,,,,,, i32*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32(,,,,,,, i32*, , , i32, i32) define @test_vloxseg7_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { @@ -3045,7 +3045,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3070,7 +3070,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16(i32*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16(,,,,,,, i32*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16(,,,,,,, i32*, , , i32, i32) define @test_vloxseg7_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { @@ -3081,7 +3081,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3106,7 +3106,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8(i32*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8(,,,,,,,, i32*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, i32*, , , i32, i32) define @test_vloxseg8_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { @@ -3117,7 +3117,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3143,7 +3143,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32(i32*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32(,,,,,,,, i32*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, i32*, , , i32, i32) define @test_vloxseg8_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { @@ -3154,7 +3154,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3180,7 +3180,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16(i32*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16(,,,,,,,, i32*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, i32*, , , i32, i32) define @test_vloxseg8_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { @@ -3191,7 +3191,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3217,7 +3217,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16(i16*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16(,, i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16(,, i16*, , , i32, i32) define @test_vloxseg2_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl) { @@ -3228,7 +3228,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3247,7 +3247,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8(i16*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8(,, i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8(,, i16*, , , i32, i32) define @test_vloxseg2_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl) { @@ -3258,7 +3258,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3277,7 +3277,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32(i16*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32(,, i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32(,, i16*, , , i32, i32) define @test_vloxseg2_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl) { @@ -3288,7 +3288,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3307,7 +3307,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16(i16*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16(,,, i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl) { @@ -3318,7 +3318,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3339,7 +3339,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8(i16*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8(,,, i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl) { @@ -3350,7 +3350,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3371,7 +3371,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32(i16*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32(,,, i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl) { @@ -3382,7 +3382,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3402,7 +3402,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16(i16*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl) { @@ -3413,7 +3413,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3435,7 +3435,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8(i16*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl) { @@ -3446,7 +3446,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3468,7 +3468,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32(i16*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl) { @@ -3479,7 +3479,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3501,7 +3501,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16(i8*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16(,, i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16(,, i8*, , , i32, i32) define @test_vloxseg2_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { @@ -3512,7 +3512,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3531,7 +3531,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8(i8*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8(,, i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8(,, i8*, , , i32, i32) define @test_vloxseg2_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { @@ -3542,7 +3542,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3561,7 +3561,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32(i8*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32(,, i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32(,, i8*, , , i32, i32) define @test_vloxseg2_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { @@ -3572,7 +3572,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3591,7 +3591,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16(i8*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16(,,, i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { @@ -3602,7 +3602,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3622,7 +3622,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8(i8*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8(,,, i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { @@ -3633,7 +3633,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3654,7 +3654,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32(i8*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32(,,, i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { @@ -3665,7 +3665,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3685,7 +3685,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16(i8*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { @@ -3696,7 +3696,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3718,7 +3718,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8(i8*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { @@ -3729,7 +3729,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3751,7 +3751,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32(i8*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { @@ -3762,7 +3762,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3783,7 +3783,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16(i8*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { @@ -3794,7 +3794,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3817,7 +3817,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8(i8*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { @@ -3828,7 +3828,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3851,7 +3851,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32(i8*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { @@ -3862,7 +3862,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3884,7 +3884,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16(i8*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { @@ -3895,7 +3895,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3919,7 +3919,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8(i8*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { @@ -3930,7 +3930,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3954,7 +3954,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32(i8*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { @@ -3965,7 +3965,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3989,7 +3989,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { @@ -4000,7 +4000,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4025,7 +4025,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { @@ -4036,7 +4036,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4061,7 +4061,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { @@ -4072,7 +4072,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4097,7 +4097,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { @@ -4108,7 +4108,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4134,7 +4134,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { @@ -4145,7 +4145,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4171,7 +4171,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { @@ -4182,7 +4182,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4208,7 +4208,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16(i32*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16(,, i32*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16(,, i32*, , , i32, i32) define @test_vloxseg2_nxv8i32_nxv8i16(i32* %base, %index, i32 %vl) { @@ -4219,7 +4219,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4238,7 +4238,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8(i32*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8(,, i32*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8(,, i32*, , , i32, i32) define @test_vloxseg2_nxv8i32_nxv8i8(i32* %base, %index, i32 %vl) { @@ -4249,7 +4249,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4268,7 +4268,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32(i32*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32(,, i32*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32(,, i32*, , , i32, i32) define @test_vloxseg2_nxv8i32_nxv8i32(i32* %base, %index, i32 %vl) { @@ -4279,7 +4279,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4298,7 +4298,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16(i8*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16(,, i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16(,, i8*, , , i32, i32) define @test_vloxseg2_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { @@ -4309,7 +4309,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4328,7 +4328,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8(i8*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8(,, i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8(,, i8*, , , i32, i32) define @test_vloxseg2_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { @@ -4339,7 +4339,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4358,7 +4358,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32(i8*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32(,, i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32(,, i8*, , , i32, i32) define @test_vloxseg2_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { @@ -4369,7 +4369,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4388,7 +4388,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16(i8*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16(,,, i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { @@ -4399,7 +4399,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4420,7 +4420,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8(i8*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8(,,, i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { @@ -4431,7 +4431,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4452,7 +4452,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32(i8*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32(,,, i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { @@ -4463,7 +4463,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4483,7 +4483,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16(i8*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { @@ -4494,7 +4494,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4516,7 +4516,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8(i8*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { @@ -4527,7 +4527,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4549,7 +4549,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32(i8*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { @@ -4560,7 +4560,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4582,7 +4582,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16(i8*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { @@ -4593,7 +4593,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4616,7 +4616,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8(i8*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { @@ -4627,7 +4627,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4650,7 +4650,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32(i8*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { @@ -4661,7 +4661,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4684,7 +4684,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16(i8*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { @@ -4695,7 +4695,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4719,7 +4719,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8(i8*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { @@ -4730,7 +4730,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4754,7 +4754,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32(i8*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { @@ -4765,7 +4765,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4789,7 +4789,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { @@ -4800,7 +4800,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4825,7 +4825,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { @@ -4836,7 +4836,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4861,7 +4861,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { @@ -4872,7 +4872,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4897,7 +4897,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { @@ -4908,7 +4908,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4934,7 +4934,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { @@ -4945,7 +4945,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4971,7 +4971,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { @@ -4982,7 +4982,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -5008,7 +5008,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8(i16*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8(,, i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8(,, i16*, , , i32, i32) define @test_vloxseg2_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { @@ -5019,7 +5019,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5038,7 +5038,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32(i16*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32(,, i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32(,, i16*, , , i32, i32) define @test_vloxseg2_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { @@ -5049,7 +5049,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5068,7 +5068,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16(i16*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16(,, i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16(,, i16*, , , i32, i32) define @test_vloxseg2_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { @@ -5079,7 +5079,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5098,7 +5098,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8(i16*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8(,,, i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { @@ -5109,7 +5109,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -5130,7 +5130,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32(i16*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32(,,, i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { @@ -5141,7 +5141,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -5162,7 +5162,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16(i16*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16(,,, i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { @@ -5173,7 +5173,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -5194,7 +5194,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8(i16*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { @@ -5205,7 +5205,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -5227,7 +5227,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32(i16*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { @@ -5238,7 +5238,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -5260,7 +5260,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16(i16*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { @@ -5271,7 +5271,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -5293,7 +5293,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8(i16*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8(,,,,, i16*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8(,,,,, i16*, , , i32, i32) define @test_vloxseg5_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { @@ -5304,7 +5304,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -5327,7 +5327,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32(i16*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32(,,,,, i16*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32(,,,,, i16*, , , i32, i32) define @test_vloxseg5_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { @@ -5338,7 +5338,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -5361,7 +5361,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16(i16*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16(,,,,, i16*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16(,,,,, i16*, , , i32, i32) define @test_vloxseg5_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { @@ -5372,7 +5372,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -5395,7 +5395,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8(i16*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8(,,,,,, i16*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8(,,,,,, i16*, , , i32, i32) define @test_vloxseg6_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { @@ -5406,7 +5406,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -5430,7 +5430,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32(i16*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32(,,,,,, i16*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32(,,,,,, i16*, , , i32, i32) define @test_vloxseg6_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { @@ -5441,7 +5441,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -5465,7 +5465,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16(i16*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16(,,,,,, i16*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16(,,,,,, i16*, , , i32, i32) define @test_vloxseg6_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { @@ -5476,7 +5476,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -5500,7 +5500,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8(i16*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8(,,,,,,, i16*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8(,,,,,,, i16*, , , i32, i32) define @test_vloxseg7_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { @@ -5511,7 +5511,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -5536,7 +5536,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32(i16*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32(,,,,,,, i16*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32(,,,,,,, i16*, , , i32, i32) define @test_vloxseg7_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { @@ -5547,7 +5547,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -5572,7 +5572,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16(i16*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16(,,,,,,, i16*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16(,,,,,,, i16*, , , i32, i32) define @test_vloxseg7_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { @@ -5583,7 +5583,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -5608,7 +5608,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8(i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8(,,,,,,,, i16*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, i16*, , , i32, i32) define @test_vloxseg8_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { @@ -5619,7 +5619,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -5645,7 +5645,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32(i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32(,,,,,,,, i16*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, i16*, , , i32, i32) define @test_vloxseg8_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { @@ -5656,7 +5656,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -5682,7 +5682,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16(i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16(,,,,,,,, i16*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, i16*, , , i32, i32) define @test_vloxseg8_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { @@ -5693,7 +5693,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -5719,7 +5719,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16(i8*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16(,, i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16(,, i8*, , , i32, i32) define @test_vloxseg2_nxv32i8_nxv32i16(i8* %base, %index, i32 %vl) { @@ -5730,7 +5730,7 @@ ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5749,7 +5749,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8(i8*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8(,, i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8(,, i8*, , , i32, i32) define @test_vloxseg2_nxv32i8_nxv32i8(i8* %base, %index, i32 %vl) { @@ -5760,7 +5760,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5779,7 +5779,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32(i8*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32(,, i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32(,, i8*, , , i32, i32) define @test_vloxseg2_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { @@ -5790,7 +5790,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5809,7 +5809,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8(i8*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8(,, i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8(,, i8*, , , i32, i32) define @test_vloxseg2_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { @@ -5820,7 +5820,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5839,7 +5839,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16(i8*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16(,, i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16(,, i8*, , , i32, i32) define @test_vloxseg2_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { @@ -5850,7 +5850,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5869,7 +5869,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32(i8*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32(,,, i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { @@ -5880,7 +5880,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -5901,7 +5901,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8(i8*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8(,,, i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { @@ -5912,7 +5912,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -5933,7 +5933,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16(i8*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16(,,, i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16(,,, i8*, , , i32, i32) define @test_vloxseg3_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { @@ -5944,7 +5944,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -5965,7 +5965,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32(i8*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { @@ -5976,7 +5976,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -5998,7 +5998,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8(i8*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { @@ -6009,7 +6009,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -6031,7 +6031,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16(i8*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16(,,,, i8*, , , i32, i32) define @test_vloxseg4_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { @@ -6042,7 +6042,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -6064,7 +6064,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32(i8*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { @@ -6075,7 +6075,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -6098,7 +6098,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8(i8*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { @@ -6109,7 +6109,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -6132,7 +6132,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16(i8*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16(,,,,, i8*, , , i32, i32) define @test_vloxseg5_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { @@ -6143,7 +6143,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -6166,7 +6166,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32(i8*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { @@ -6177,7 +6177,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -6201,7 +6201,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8(i8*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { @@ -6212,7 +6212,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -6236,7 +6236,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16(i8*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16(,,,,,, i8*, , , i32, i32) define @test_vloxseg6_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { @@ -6247,7 +6247,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -6271,7 +6271,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { @@ -6282,7 +6282,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -6307,7 +6307,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { @@ -6318,7 +6318,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -6343,7 +6343,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16(,,,,,,, i8*, , , i32, i32) define @test_vloxseg7_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { @@ -6354,7 +6354,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -6379,7 +6379,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { @@ -6390,7 +6390,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -6416,7 +6416,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { @@ -6427,7 +6427,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -6453,7 +6453,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, i8*, , , i32, i32) define @test_vloxseg8_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { @@ -6464,7 +6464,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -6490,7 +6490,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32(i16*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32(,, i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32(,, i16*, , , i32, i32) define @test_vloxseg2_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { @@ -6501,7 +6501,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -6520,7 +6520,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8(i16*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8(,, i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8(,, i16*, , , i32, i32) define @test_vloxseg2_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { @@ -6531,7 +6531,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -6550,7 +6550,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16(i16*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16(,, i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16(,, i16*, , , i32, i32) define @test_vloxseg2_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { @@ -6561,7 +6561,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -6580,7 +6580,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32(i16*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32(,,, i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { @@ -6591,7 +6591,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -6612,7 +6612,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8(i16*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8(,,, i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { @@ -6623,7 +6623,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -6644,7 +6644,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16(i16*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16(,,, i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16(,,, i16*, , , i32, i32) define @test_vloxseg3_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { @@ -6655,7 +6655,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -6676,7 +6676,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32(i16*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { @@ -6687,7 +6687,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -6709,7 +6709,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8(i16*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { @@ -6720,7 +6720,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -6742,7 +6742,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16(i16*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16(,,,, i16*, , , i32, i32) define @test_vloxseg4_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { @@ -6753,7 +6753,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -6775,7 +6775,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32(i16*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32(,,,,, i16*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32(,,,,, i16*, , , i32, i32) define @test_vloxseg5_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { @@ -6786,7 +6786,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -6809,7 +6809,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8(i16*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8(,,,,, i16*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8(,,,,, i16*, , , i32, i32) define @test_vloxseg5_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { @@ -6820,7 +6820,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -6843,7 +6843,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16(i16*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16(,,,,, i16*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16(,,,,, i16*, , , i32, i32) define @test_vloxseg5_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { @@ -6854,7 +6854,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -6877,7 +6877,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32(i16*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32(,,,,,, i16*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32(,,,,,, i16*, , , i32, i32) define @test_vloxseg6_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { @@ -6888,7 +6888,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -6912,7 +6912,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8(i16*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8(,,,,,, i16*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8(,,,,,, i16*, , , i32, i32) define @test_vloxseg6_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { @@ -6923,7 +6923,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -6947,7 +6947,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16(i16*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16(,,,,,, i16*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16(,,,,,, i16*, , , i32, i32) define @test_vloxseg6_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { @@ -6958,7 +6958,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -6982,7 +6982,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32(i16*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32(,,,,,,, i16*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32(,,,,,,, i16*, , , i32, i32) define @test_vloxseg7_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { @@ -6993,7 +6993,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -7018,7 +7018,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8(i16*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8(,,,,,,, i16*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8(,,,,,,, i16*, , , i32, i32) define @test_vloxseg7_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { @@ -7029,7 +7029,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -7054,7 +7054,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16(i16*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16(,,,,,,, i16*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16(,,,,,,, i16*, , , i32, i32) define @test_vloxseg7_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { @@ -7065,7 +7065,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -7090,7 +7090,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32(i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32(,,,,,,,, i16*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, i16*, , , i32, i32) define @test_vloxseg8_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { @@ -7101,7 +7101,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -7127,7 +7127,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8(i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8(,,,,,,,, i16*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, i16*, , , i32, i32) define @test_vloxseg8_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { @@ -7138,7 +7138,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -7164,7 +7164,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16(i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16(,,,,,,,, i16*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, i16*, , , i32, i32) define @test_vloxseg8_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { @@ -7175,7 +7175,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -7201,7 +7201,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16(i32*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16(,, i32*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16(,, i32*, , , i32, i32) define @test_vloxseg2_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl) { @@ -7212,7 +7212,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7231,7 +7231,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8(i32*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8(,, i32*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8(,, i32*, , , i32, i32) define @test_vloxseg2_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl) { @@ -7242,7 +7242,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7261,7 +7261,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32(i32*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32(,, i32*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32(,, i32*, , , i32, i32) define @test_vloxseg2_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl) { @@ -7272,7 +7272,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7291,7 +7291,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16(i32*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16(,,, i32*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16(,,, i32*, , , i32, i32) define @test_vloxseg3_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl) { @@ -7302,7 +7302,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16( undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -7323,7 +7323,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8(i32*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8(,,, i32*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8(,,, i32*, , , i32, i32) define @test_vloxseg3_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl) { @@ -7334,7 +7334,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8( undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -7355,7 +7355,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32(i32*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32(,,, i32*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32(,,, i32*, , , i32, i32) define @test_vloxseg3_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl) { @@ -7366,7 +7366,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32( undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -7387,7 +7387,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16(i32*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16(,,,, i32*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16(,,,, i32*, , , i32, i32) define @test_vloxseg4_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl) { @@ -7398,7 +7398,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16( undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -7420,7 +7420,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8(i32*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8(,,,, i32*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8(,,,, i32*, , , i32, i32) define @test_vloxseg4_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl) { @@ -7431,7 +7431,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8( undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -7453,7 +7453,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32(i32*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32(,,,, i32*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32(,,,, i32*, , , i32, i32) define @test_vloxseg4_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl) { @@ -7464,7 +7464,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32( undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -7486,7 +7486,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16(half*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16(,, half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16(,, half*, , , i32, i32) define @test_vloxseg2_nxv16f16_nxv16i16(half* %base, %index, i32 %vl) { @@ -7497,7 +7497,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7516,7 +7516,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8(half*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8(,, half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8(,, half*, , , i32, i32) define @test_vloxseg2_nxv16f16_nxv16i8(half* %base, %index, i32 %vl) { @@ -7527,7 +7527,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7546,7 +7546,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32(half*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32(,, half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32(,, half*, , , i32, i32) define @test_vloxseg2_nxv16f16_nxv16i32(half* %base, %index, i32 %vl) { @@ -7557,7 +7557,7 @@ ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7576,7 +7576,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16(double*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16(,, double*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16(,, double*, , , i32, i32) define @test_vloxseg2_nxv4f64_nxv4i16(double* %base, %index, i32 %vl) { @@ -7587,7 +7587,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16( undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7606,7 +7606,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8(double*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8(,, double*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8(,, double*, , , i32, i32) define @test_vloxseg2_nxv4f64_nxv4i8(double* %base, %index, i32 %vl) { @@ -7617,7 +7617,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8( undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7636,7 +7636,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32(double*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32(,, double*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32(,, double*, , , i32, i32) define @test_vloxseg2_nxv4f64_nxv4i32(double* %base, %index, i32 %vl) { @@ -7647,7 +7647,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32( undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7666,7 +7666,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8(double*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8(,, double*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8(,, double*, , , i32, i32) define @test_vloxseg2_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { @@ -7677,7 +7677,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8( undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7696,7 +7696,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32(double*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32(,, double*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32(,, double*, , , i32, i32) define @test_vloxseg2_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { @@ -7707,7 +7707,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32( undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7726,7 +7726,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16(double*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16(,, double*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16(,, double*, , , i32, i32) define @test_vloxseg2_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { @@ -7737,7 +7737,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16( undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7756,7 +7756,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8(double*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8(,,, double*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8(,,, double*, , , i32, i32) define @test_vloxseg3_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { @@ -7767,7 +7767,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8( undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -7788,7 +7788,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32(double*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32(,,, double*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32(,,, double*, , , i32, i32) define @test_vloxseg3_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { @@ -7799,7 +7799,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32( undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -7820,7 +7820,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16(double*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16(,,, double*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16(,,, double*, , , i32, i32) define @test_vloxseg3_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { @@ -7831,7 +7831,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16( undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -7852,7 +7852,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8(double*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8(,,,, double*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8(,,,, double*, , , i32, i32) define @test_vloxseg4_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { @@ -7863,7 +7863,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8( undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -7885,7 +7885,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32(double*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32(,,,, double*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32(,,,, double*, , , i32, i32) define @test_vloxseg4_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { @@ -7896,7 +7896,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32( undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -7918,7 +7918,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16(double*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16(,,,, double*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16(,,,, double*, , , i32, i32) define @test_vloxseg4_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { @@ -7929,7 +7929,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16( undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -7951,7 +7951,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8(double*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8(,,,,, double*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8(,,,,, double*, , , i32, i32) define @test_vloxseg5_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { @@ -7962,7 +7962,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -7985,7 +7985,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32(double*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32(,,,,, double*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32(,,,,, double*, , , i32, i32) define @test_vloxseg5_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { @@ -7996,7 +7996,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -8019,7 +8019,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16(double*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16(,,,,, double*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16(,,,,, double*, , , i32, i32) define @test_vloxseg5_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { @@ -8030,7 +8030,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -8053,7 +8053,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8(double*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8(,,,,,, double*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8(,,,,,, double*, , , i32, i32) define @test_vloxseg6_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { @@ -8064,7 +8064,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -8088,7 +8088,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32(double*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32(,,,,,, double*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32(,,,,,, double*, , , i32, i32) define @test_vloxseg6_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { @@ -8099,7 +8099,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -8123,7 +8123,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16(double*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16(,,,,,, double*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16(,,,,,, double*, , , i32, i32) define @test_vloxseg6_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { @@ -8134,7 +8134,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -8158,7 +8158,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8(double*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8(,,,,,,, double*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8(,,,,,,, double*, , , i32, i32) define @test_vloxseg7_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { @@ -8169,7 +8169,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -8194,7 +8194,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32(double*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32(,,,,,,, double*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32(,,,,,,, double*, , , i32, i32) define @test_vloxseg7_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { @@ -8205,7 +8205,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -8230,7 +8230,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16(double*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16(,,,,,,, double*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16(,,,,,,, double*, , , i32, i32) define @test_vloxseg7_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { @@ -8241,7 +8241,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -8266,7 +8266,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8(double*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8(,,,,,,,, double*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, double*, , , i32, i32) define @test_vloxseg8_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { @@ -8277,7 +8277,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -8303,7 +8303,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32(double*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32(,,,,,,,, double*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, double*, , , i32, i32) define @test_vloxseg8_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { @@ -8314,7 +8314,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -8340,7 +8340,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16(double*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16(,,,,,,,, double*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, double*, , , i32, i32) define @test_vloxseg8_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { @@ -8351,7 +8351,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -8377,7 +8377,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32(float*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32(,, float*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32(,, float*, , , i32, i32) define @test_vloxseg2_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { @@ -8388,7 +8388,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -8407,7 +8407,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8(float*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8(,, float*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8(,, float*, , , i32, i32) define @test_vloxseg2_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { @@ -8418,7 +8418,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -8437,7 +8437,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16(float*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16(,, float*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16(,, float*, , , i32, i32) define @test_vloxseg2_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { @@ -8448,7 +8448,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -8467,7 +8467,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32(float*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32(,,, float*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32(,,, float*, , , i32, i32) define @test_vloxseg3_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { @@ -8478,7 +8478,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32( undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -8499,7 +8499,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8(float*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8(,,, float*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8(,,, float*, , , i32, i32) define @test_vloxseg3_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { @@ -8510,7 +8510,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8( undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -8531,7 +8531,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16(float*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16(,,, float*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16(,,, float*, , , i32, i32) define @test_vloxseg3_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { @@ -8542,7 +8542,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16( undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -8563,7 +8563,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32(float*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32(,,,, float*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32(,,,, float*, , , i32, i32) define @test_vloxseg4_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { @@ -8574,7 +8574,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32( undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -8596,7 +8596,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8(float*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8(,,,, float*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8(,,,, float*, , , i32, i32) define @test_vloxseg4_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { @@ -8607,7 +8607,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8( undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -8629,7 +8629,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16(float*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16(,,,, float*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16(,,,, float*, , , i32, i32) define @test_vloxseg4_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { @@ -8640,7 +8640,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16( undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -8662,7 +8662,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32(float*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32(,,,,, float*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32(,,,,, float*, , , i32, i32) define @test_vloxseg5_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { @@ -8673,7 +8673,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -8696,7 +8696,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8(float*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8(,,,,, float*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8(,,,,, float*, , , i32, i32) define @test_vloxseg5_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { @@ -8707,7 +8707,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -8730,7 +8730,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16(float*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16(,,,,, float*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16(,,,,, float*, , , i32, i32) define @test_vloxseg5_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { @@ -8741,7 +8741,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -8764,7 +8764,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32(float*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32(,,,,,, float*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32(,,,,,, float*, , , i32, i32) define @test_vloxseg6_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { @@ -8775,7 +8775,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -8799,7 +8799,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8(float*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8(,,,,,, float*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8(,,,,,, float*, , , i32, i32) define @test_vloxseg6_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { @@ -8810,7 +8810,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -8834,7 +8834,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16(float*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16(,,,,,, float*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16(,,,,,, float*, , , i32, i32) define @test_vloxseg6_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { @@ -8845,7 +8845,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -8869,7 +8869,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32(float*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32(,,,,,,, float*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32(,,,,,,, float*, , , i32, i32) define @test_vloxseg7_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { @@ -8880,7 +8880,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -8905,7 +8905,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8(float*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8(,,,,,,, float*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8(,,,,,,, float*, , , i32, i32) define @test_vloxseg7_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { @@ -8916,7 +8916,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -8941,7 +8941,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16(float*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16(,,,,,,, float*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16(,,,,,,, float*, , , i32, i32) define @test_vloxseg7_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { @@ -8952,7 +8952,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -8977,7 +8977,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32(float*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32(,,,,,,,, float*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, float*, , , i32, i32) define @test_vloxseg8_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { @@ -8988,7 +8988,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -9014,7 +9014,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8(float*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8(,,,,,,,, float*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, float*, , , i32, i32) define @test_vloxseg8_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { @@ -9025,7 +9025,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -9051,7 +9051,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16(float*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16(,,,,,,,, float*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, float*, , , i32, i32) define @test_vloxseg8_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { @@ -9062,7 +9062,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -9088,7 +9088,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8(half*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8(,, half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8(,, half*, , , i32, i32) define @test_vloxseg2_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { @@ -9099,7 +9099,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9118,7 +9118,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32(half*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32(,, half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32(,, half*, , , i32, i32) define @test_vloxseg2_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { @@ -9129,7 +9129,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9148,7 +9148,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16(half*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16(,, half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16(,, half*, , , i32, i32) define @test_vloxseg2_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { @@ -9159,7 +9159,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9178,7 +9178,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8(half*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8(,,, half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8(,,, half*, , , i32, i32) define @test_vloxseg3_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { @@ -9189,7 +9189,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -9210,7 +9210,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32(half*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32(,,, half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32(,,, half*, , , i32, i32) define @test_vloxseg3_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { @@ -9221,7 +9221,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -9242,7 +9242,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16(half*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16(,,, half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16(,,, half*, , , i32, i32) define @test_vloxseg3_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { @@ -9253,7 +9253,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -9274,7 +9274,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8(half*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8(,,,, half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { @@ -9285,7 +9285,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -9307,7 +9307,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32(half*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32(,,,, half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { @@ -9318,7 +9318,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -9340,7 +9340,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16(half*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16(,,,, half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { @@ -9351,7 +9351,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -9373,7 +9373,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8(half*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8(,,,,, half*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8(,,,,, half*, , , i32, i32) define @test_vloxseg5_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { @@ -9384,7 +9384,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -9407,7 +9407,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32(half*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32(,,,,, half*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32(,,,,, half*, , , i32, i32) define @test_vloxseg5_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { @@ -9418,7 +9418,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -9441,7 +9441,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16(half*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16(,,,,, half*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16(,,,,, half*, , , i32, i32) define @test_vloxseg5_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { @@ -9452,7 +9452,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -9475,7 +9475,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8(half*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8(,,,,,, half*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8(,,,,,, half*, , , i32, i32) define @test_vloxseg6_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { @@ -9486,7 +9486,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -9510,7 +9510,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32(half*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32(,,,,,, half*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32(,,,,,, half*, , , i32, i32) define @test_vloxseg6_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { @@ -9521,7 +9521,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -9545,7 +9545,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16(half*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16(,,,,,, half*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16(,,,,,, half*, , , i32, i32) define @test_vloxseg6_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { @@ -9556,7 +9556,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -9580,7 +9580,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8(half*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8(,,,,,,, half*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8(,,,,,,, half*, , , i32, i32) define @test_vloxseg7_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { @@ -9591,7 +9591,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -9616,7 +9616,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32(half*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32(,,,,,,, half*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32(,,,,,,, half*, , , i32, i32) define @test_vloxseg7_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { @@ -9627,7 +9627,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -9652,7 +9652,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16(half*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16(,,,,,,, half*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16(,,,,,,, half*, , , i32, i32) define @test_vloxseg7_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { @@ -9663,7 +9663,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -9688,7 +9688,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8(half*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8(,,,,,,,, half*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, half*, , , i32, i32) define @test_vloxseg8_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { @@ -9699,7 +9699,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -9725,7 +9725,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32(half*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32(,,,,,,,, half*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, half*, , , i32, i32) define @test_vloxseg8_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { @@ -9736,7 +9736,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -9762,7 +9762,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16(half*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16(,,,,,,,, half*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, half*, , , i32, i32) define @test_vloxseg8_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { @@ -9773,7 +9773,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -9799,7 +9799,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8(float*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8(,, float*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8(,, float*, , , i32, i32) define @test_vloxseg2_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { @@ -9810,7 +9810,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9829,7 +9829,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32(float*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32(,, float*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32(,, float*, , , i32, i32) define @test_vloxseg2_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { @@ -9840,7 +9840,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9859,7 +9859,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16(float*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16(,, float*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16(,, float*, , , i32, i32) define @test_vloxseg2_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { @@ -9870,7 +9870,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9889,7 +9889,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8(float*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8(,,, float*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8(,,, float*, , , i32, i32) define @test_vloxseg3_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { @@ -9900,7 +9900,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8( undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -9921,7 +9921,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32(float*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32(,,, float*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32(,,, float*, , , i32, i32) define @test_vloxseg3_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { @@ -9932,7 +9932,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32( undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -9953,7 +9953,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16(float*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16(,,, float*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16(,,, float*, , , i32, i32) define @test_vloxseg3_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { @@ -9964,7 +9964,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16( undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -9985,7 +9985,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8(float*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8(,,,, float*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8(,,,, float*, , , i32, i32) define @test_vloxseg4_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { @@ -9996,7 +9996,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8( undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -10018,7 +10018,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32(float*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32(,,,, float*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32(,,,, float*, , , i32, i32) define @test_vloxseg4_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { @@ -10029,7 +10029,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32( undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -10051,7 +10051,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16(float*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16(,,,, float*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16(,,,, float*, , , i32, i32) define @test_vloxseg4_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { @@ -10062,7 +10062,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16( undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -10084,7 +10084,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8(float*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8(,,,,, float*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8(,,,,, float*, , , i32, i32) define @test_vloxseg5_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { @@ -10095,7 +10095,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -10118,7 +10118,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32(float*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32(,,,,, float*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32(,,,,, float*, , , i32, i32) define @test_vloxseg5_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { @@ -10129,7 +10129,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -10152,7 +10152,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16(float*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16(,,,,, float*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16(,,,,, float*, , , i32, i32) define @test_vloxseg5_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { @@ -10163,7 +10163,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -10186,7 +10186,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8(float*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8(,,,,,, float*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8(,,,,,, float*, , , i32, i32) define @test_vloxseg6_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { @@ -10197,7 +10197,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -10221,7 +10221,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32(float*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32(,,,,,, float*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32(,,,,,, float*, , , i32, i32) define @test_vloxseg6_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { @@ -10232,7 +10232,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -10256,7 +10256,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16(float*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16(,,,,,, float*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16(,,,,,, float*, , , i32, i32) define @test_vloxseg6_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { @@ -10267,7 +10267,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -10291,7 +10291,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8(float*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8(,,,,,,, float*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8(,,,,,,, float*, , , i32, i32) define @test_vloxseg7_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { @@ -10302,7 +10302,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -10327,7 +10327,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32(float*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32(,,,,,,, float*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32(,,,,,,, float*, , , i32, i32) define @test_vloxseg7_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { @@ -10338,7 +10338,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -10363,7 +10363,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16(float*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16(,,,,,,, float*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16(,,,,,,, float*, , , i32, i32) define @test_vloxseg7_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { @@ -10374,7 +10374,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -10399,7 +10399,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8(float*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8(,,,,,,,, float*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, float*, , , i32, i32) define @test_vloxseg8_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { @@ -10410,7 +10410,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -10436,7 +10436,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32(float*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32(,,,,,,,, float*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, float*, , , i32, i32) define @test_vloxseg8_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { @@ -10447,7 +10447,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -10473,7 +10473,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16(float*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16(,,,,,,,, float*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, float*, , , i32, i32) define @test_vloxseg8_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { @@ -10484,7 +10484,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -10510,7 +10510,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16(half*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16(,, half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16(,, half*, , , i32, i32) define @test_vloxseg2_nxv8f16_nxv8i16(half* %base, %index, i32 %vl) { @@ -10521,7 +10521,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10540,7 +10540,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8(half*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8(,, half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8(,, half*, , , i32, i32) define @test_vloxseg2_nxv8f16_nxv8i8(half* %base, %index, i32 %vl) { @@ -10551,7 +10551,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10570,7 +10570,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32(half*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32(,, half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32(,, half*, , , i32, i32) define @test_vloxseg2_nxv8f16_nxv8i32(half* %base, %index, i32 %vl) { @@ -10581,7 +10581,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10600,7 +10600,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16(half*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16(,,, half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16(,,, half*, , , i32, i32) define @test_vloxseg3_nxv8f16_nxv8i16(half* %base, %index, i32 %vl) { @@ -10611,7 +10611,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -10632,7 +10632,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8(half*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8(,,, half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8(,,, half*, , , i32, i32) define @test_vloxseg3_nxv8f16_nxv8i8(half* %base, %index, i32 %vl) { @@ -10643,7 +10643,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -10664,7 +10664,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32(half*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32(,,, half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32(,,, half*, , , i32, i32) define @test_vloxseg3_nxv8f16_nxv8i32(half* %base, %index, i32 %vl) { @@ -10675,7 +10675,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -10695,7 +10695,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16(half*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16(,,,, half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv8f16_nxv8i16(half* %base, %index, i32 %vl) { @@ -10706,7 +10706,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -10728,7 +10728,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8(half*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8(,,,, half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv8f16_nxv8i8(half* %base, %index, i32 %vl) { @@ -10739,7 +10739,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -10761,7 +10761,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32(half*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32(,,,, half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv8f16_nxv8i32(half* %base, %index, i32 %vl) { @@ -10772,7 +10772,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -10794,7 +10794,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16(float*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16(,, float*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16(,, float*, , , i32, i32) define @test_vloxseg2_nxv8f32_nxv8i16(float* %base, %index, i32 %vl) { @@ -10805,7 +10805,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10824,7 +10824,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8(float*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8(,, float*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8(,, float*, , , i32, i32) define @test_vloxseg2_nxv8f32_nxv8i8(float* %base, %index, i32 %vl) { @@ -10835,7 +10835,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10854,7 +10854,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32(float*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32(,, float*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32(,, float*, , , i32, i32) define @test_vloxseg2_nxv8f32_nxv8i32(float* %base, %index, i32 %vl) { @@ -10865,7 +10865,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10884,7 +10884,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32(double*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32(,, double*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32(,, double*, , , i32, i32) define @test_vloxseg2_nxv2f64_nxv2i32(double* %base, %index, i32 %vl) { @@ -10895,7 +10895,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32( undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10914,7 +10914,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8(double*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8(,, double*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8(,, double*, , , i32, i32) define @test_vloxseg2_nxv2f64_nxv2i8(double* %base, %index, i32 %vl) { @@ -10925,7 +10925,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8( undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10944,7 +10944,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16(double*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16(,, double*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16(,, double*, , , i32, i32) define @test_vloxseg2_nxv2f64_nxv2i16(double* %base, %index, i32 %vl) { @@ -10955,7 +10955,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16( undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10974,7 +10974,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32(double*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32(,,, double*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32(,,, double*, , , i32, i32) define @test_vloxseg3_nxv2f64_nxv2i32(double* %base, %index, i32 %vl) { @@ -10985,7 +10985,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32( undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11006,7 +11006,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8(double*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8(,,, double*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8(,,, double*, , , i32, i32) define @test_vloxseg3_nxv2f64_nxv2i8(double* %base, %index, i32 %vl) { @@ -11017,7 +11017,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8( undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11038,7 +11038,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16(double*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16(,,, double*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16(,,, double*, , , i32, i32) define @test_vloxseg3_nxv2f64_nxv2i16(double* %base, %index, i32 %vl) { @@ -11049,7 +11049,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16( undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11070,7 +11070,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32(double*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32(,,,, double*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32(,,,, double*, , , i32, i32) define @test_vloxseg4_nxv2f64_nxv2i32(double* %base, %index, i32 %vl) { @@ -11081,7 +11081,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32( undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11103,7 +11103,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8(double*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8(,,,, double*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8(,,,, double*, , , i32, i32) define @test_vloxseg4_nxv2f64_nxv2i8(double* %base, %index, i32 %vl) { @@ -11114,7 +11114,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8( undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11136,7 +11136,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16(double*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16(,,,, double*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16(,,,, double*, , , i32, i32) define @test_vloxseg4_nxv2f64_nxv2i16(double* %base, %index, i32 %vl) { @@ -11147,7 +11147,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16( undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11169,7 +11169,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16(half*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16(,, half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16(,, half*, , , i32, i32) define @test_vloxseg2_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { @@ -11180,7 +11180,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11199,7 +11199,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8(half*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8(,, half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8(,, half*, , , i32, i32) define @test_vloxseg2_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { @@ -11210,7 +11210,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11229,7 +11229,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32(half*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32(,, half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32(,, half*, , , i32, i32) define @test_vloxseg2_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { @@ -11240,7 +11240,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11259,7 +11259,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16(half*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16(,,, half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16(,,, half*, , , i32, i32) define @test_vloxseg3_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { @@ -11270,7 +11270,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11291,7 +11291,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8(half*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8(,,, half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8(,,, half*, , , i32, i32) define @test_vloxseg3_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { @@ -11302,7 +11302,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11323,7 +11323,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32(half*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32(,,, half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32(,,, half*, , , i32, i32) define @test_vloxseg3_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { @@ -11334,7 +11334,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11354,7 +11354,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16(half*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16(,,,, half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { @@ -11365,7 +11365,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11387,7 +11387,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8(half*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8(,,,, half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { @@ -11398,7 +11398,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11420,7 +11420,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32(half*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32(,,,, half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { @@ -11431,7 +11431,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11453,7 +11453,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16(half*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16(,,,,, half*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16(,,,,, half*, , , i32, i32) define @test_vloxseg5_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { @@ -11464,7 +11464,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -11487,7 +11487,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8(half*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8(,,,,, half*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8(,,,,, half*, , , i32, i32) define @test_vloxseg5_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { @@ -11498,7 +11498,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -11521,7 +11521,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32(half*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32(,,,,, half*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32(,,,,, half*, , , i32, i32) define @test_vloxseg5_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { @@ -11532,7 +11532,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -11555,7 +11555,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16(half*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16(,,,,,, half*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16(,,,,,, half*, , , i32, i32) define @test_vloxseg6_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { @@ -11566,7 +11566,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -11590,7 +11590,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8(half*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8(,,,,,, half*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8(,,,,,, half*, , , i32, i32) define @test_vloxseg6_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { @@ -11601,7 +11601,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -11625,7 +11625,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32(half*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32(,,,,,, half*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32(,,,,,, half*, , , i32, i32) define @test_vloxseg6_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { @@ -11636,7 +11636,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -11660,7 +11660,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16(half*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16(,,,,,,, half*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16(,,,,,,, half*, , , i32, i32) define @test_vloxseg7_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { @@ -11671,7 +11671,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -11696,7 +11696,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8(half*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8(,,,,,,, half*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8(,,,,,,, half*, , , i32, i32) define @test_vloxseg7_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { @@ -11707,7 +11707,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -11732,7 +11732,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32(half*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32(,,,,,,, half*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32(,,,,,,, half*, , , i32, i32) define @test_vloxseg7_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { @@ -11743,7 +11743,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -11768,7 +11768,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16(half*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16(,,,,,,,, half*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, half*, , , i32, i32) define @test_vloxseg8_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { @@ -11779,7 +11779,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -11805,7 +11805,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8(half*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8(,,,,,,,, half*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, half*, , , i32, i32) define @test_vloxseg8_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { @@ -11816,7 +11816,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -11842,7 +11842,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32(half*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32(,,,,,,,, half*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, half*, , , i32, i32) define @test_vloxseg8_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { @@ -11853,7 +11853,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -11879,7 +11879,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32(half*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32(,, half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32(,, half*, , , i32, i32) define @test_vloxseg2_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { @@ -11890,7 +11890,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11909,7 +11909,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8(half*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8(,, half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8(,, half*, , , i32, i32) define @test_vloxseg2_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { @@ -11920,7 +11920,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11939,7 +11939,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16(half*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16(,, half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16(,, half*, , , i32, i32) define @test_vloxseg2_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { @@ -11950,7 +11950,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11969,7 +11969,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32(half*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32(,,, half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32(,,, half*, , , i32, i32) define @test_vloxseg3_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { @@ -11980,7 +11980,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -12001,7 +12001,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8(half*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8(,,, half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8(,,, half*, , , i32, i32) define @test_vloxseg3_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { @@ -12012,7 +12012,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -12033,7 +12033,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16(half*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16(,,, half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16(,,, half*, , , i32, i32) define @test_vloxseg3_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { @@ -12044,7 +12044,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -12065,7 +12065,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32(half*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32(,,,, half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { @@ -12076,7 +12076,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -12098,7 +12098,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8(half*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8(,,,, half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { @@ -12109,7 +12109,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -12131,7 +12131,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16(half*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16(,,,, half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16(,,,, half*, , , i32, i32) define @test_vloxseg4_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { @@ -12142,7 +12142,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -12164,7 +12164,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32(half*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32(,,,,, half*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32(,,,,, half*, , , i32, i32) define @test_vloxseg5_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { @@ -12175,7 +12175,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -12198,7 +12198,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8(half*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8(,,,,, half*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8(,,,,, half*, , , i32, i32) define @test_vloxseg5_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { @@ -12209,7 +12209,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -12232,7 +12232,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16(half*, , i32) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16(,,,,, half*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16(,,,,, half*, , , i32, i32) define @test_vloxseg5_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { @@ -12243,7 +12243,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -12266,7 +12266,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32(half*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32(,,,,,, half*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32(,,,,,, half*, , , i32, i32) define @test_vloxseg6_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { @@ -12277,7 +12277,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -12301,7 +12301,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8(half*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8(,,,,,, half*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8(,,,,,, half*, , , i32, i32) define @test_vloxseg6_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { @@ -12312,7 +12312,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -12336,7 +12336,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16(half*, , i32) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16(,,,,,, half*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16(,,,,,, half*, , , i32, i32) define @test_vloxseg6_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { @@ -12347,7 +12347,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -12371,7 +12371,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32(half*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32(,,,,,,, half*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32(,,,,,,, half*, , , i32, i32) define @test_vloxseg7_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { @@ -12382,7 +12382,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -12407,7 +12407,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8(half*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8(,,,,,,, half*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8(,,,,,,, half*, , , i32, i32) define @test_vloxseg7_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { @@ -12418,7 +12418,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -12443,7 +12443,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16(half*, , i32) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16(,,,,,,, half*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16(,,,,,,, half*, , , i32, i32) define @test_vloxseg7_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { @@ -12454,7 +12454,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -12479,7 +12479,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32(half*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32(,,,,,,,, half*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, half*, , , i32, i32) define @test_vloxseg8_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { @@ -12490,7 +12490,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -12516,7 +12516,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8(half*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8(,,,,,,,, half*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, half*, , , i32, i32) define @test_vloxseg8_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { @@ -12527,7 +12527,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -12553,7 +12553,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16(half*, , i32) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16(,,,,,,,, half*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, half*, , , i32, i32) define @test_vloxseg8_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { @@ -12564,7 +12564,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -12590,7 +12590,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16(float*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16(,, float*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16(,, float*, , , i32, i32) define @test_vloxseg2_nxv4f32_nxv4i16(float* %base, %index, i32 %vl) { @@ -12601,7 +12601,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -12620,7 +12620,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8(float*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8(,, float*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8(,, float*, , , i32, i32) define @test_vloxseg2_nxv4f32_nxv4i8(float* %base, %index, i32 %vl) { @@ -12631,7 +12631,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -12650,7 +12650,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32(float*, , i32) +declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32(,, float*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32(,, float*, , , i32, i32) define @test_vloxseg2_nxv4f32_nxv4i32(float* %base, %index, i32 %vl) { @@ -12661,7 +12661,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -12680,7 +12680,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16(float*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16(,,, float*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16(,,, float*, , , i32, i32) define @test_vloxseg3_nxv4f32_nxv4i16(float* %base, %index, i32 %vl) { @@ -12691,7 +12691,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16( undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -12712,7 +12712,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8(float*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8(,,, float*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8(,,, float*, , , i32, i32) define @test_vloxseg3_nxv4f32_nxv4i8(float* %base, %index, i32 %vl) { @@ -12723,7 +12723,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8( undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -12744,7 +12744,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32(float*, , i32) +declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32(,,, float*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32(,,, float*, , , i32, i32) define @test_vloxseg3_nxv4f32_nxv4i32(float* %base, %index, i32 %vl) { @@ -12755,7 +12755,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32( undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -12776,7 +12776,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16(float*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16(,,,, float*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16(,,,, float*, , , i32, i32) define @test_vloxseg4_nxv4f32_nxv4i16(float* %base, %index, i32 %vl) { @@ -12787,7 +12787,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16( undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -12809,7 +12809,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8(float*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8(,,,, float*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8(,,,, float*, , , i32, i32) define @test_vloxseg4_nxv4f32_nxv4i8(float* %base, %index, i32 %vl) { @@ -12820,7 +12820,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8( undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -12842,7 +12842,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32(float*, , i32) +declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32(,,,, float*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32(,,,, float*, , , i32, i32) define @test_vloxseg4_nxv4f32_nxv4i32(float* %base, %index, i32 %vl) { @@ -12853,7 +12853,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32( undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll @@ -2,7 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+experimental-zvfh \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(,, i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i64, i64) define @test_vloxseg2_nxv16i16_nxv16i16(i16* %base, %index, i64 %vl) { @@ -13,7 +13,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -32,7 +32,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8(,, i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8(,, i16*, , , i64, i64) define @test_vloxseg2_nxv16i16_nxv16i8(i16* %base, %index, i64 %vl) { @@ -43,7 +43,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -62,7 +62,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32(,, i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32(,, i16*, , , i64, i64) define @test_vloxseg2_nxv16i16_nxv16i32(i16* %base, %index, i64 %vl) { @@ -73,7 +73,7 @@ ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -92,7 +92,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32(i32*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32(,, i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32(,, i32*, , , i64, i64) define @test_vloxseg2_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { @@ -103,7 +103,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -122,7 +122,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8(i32*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8(,, i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8(,, i32*, , , i64, i64) define @test_vloxseg2_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { @@ -133,7 +133,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -152,7 +152,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i64(i32*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i64(,, i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64(,, i32*, , , i64, i64) define @test_vloxseg2_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { @@ -163,7 +163,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i64( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -182,7 +182,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16(i32*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16(,, i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16(,, i32*, , , i64, i64) define @test_vloxseg2_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { @@ -193,7 +193,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -212,7 +212,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32(i32*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32(,,, i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { @@ -223,7 +223,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -244,7 +244,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8(i32*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8(,,, i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { @@ -255,7 +255,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -276,7 +276,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i64(i32*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i64(,,, i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { @@ -287,7 +287,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i64( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -307,7 +307,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16(i32*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16(,,, i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { @@ -318,7 +318,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -339,7 +339,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32(i32*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { @@ -350,7 +350,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -372,7 +372,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8(i32*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { @@ -383,7 +383,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -405,7 +405,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i64(i32*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i64(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { @@ -416,7 +416,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i64( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -438,7 +438,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16(i32*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { @@ -449,7 +449,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -471,7 +471,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16(,, i8*, , , i64, i64) define @test_vloxseg2_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { @@ -482,7 +482,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -501,7 +501,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8(,, i8*, , , i64, i64) define @test_vloxseg2_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { @@ -512,7 +512,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -531,7 +531,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32(,, i8*, , , i64, i64) define @test_vloxseg2_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { @@ -542,7 +542,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -561,7 +561,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16(i8*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16(,,, i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { @@ -572,7 +572,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -592,7 +592,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8(i8*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8(,,, i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { @@ -603,7 +603,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -624,7 +624,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32(i8*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32(,,, i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { @@ -635,7 +635,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -655,7 +655,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16(i8*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { @@ -666,7 +666,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -688,7 +688,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8(i8*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { @@ -699,7 +699,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -721,7 +721,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32(i8*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { @@ -732,7 +732,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -753,7 +753,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i64(i64*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i64(,, i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64(,, i64*, , , i64, i64) define @test_vloxseg2_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { @@ -764,7 +764,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i64( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -783,7 +783,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i32(i64*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i32(,, i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32(,, i64*, , , i64, i64) define @test_vloxseg2_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { @@ -794,7 +794,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i32( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -813,7 +813,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i16(i64*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i16(,, i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16(,, i64*, , , i64, i64) define @test_vloxseg2_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { @@ -824,7 +824,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i16( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -843,7 +843,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i8(i64*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i8(,, i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8(,, i64*, , , i64, i64) define @test_vloxseg2_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { @@ -854,7 +854,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i8( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -873,7 +873,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i64(i64*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i64(,,, i64*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64(,,, i64*, , , i64, i64) define @test_vloxseg3_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { @@ -884,7 +884,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i64( undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -905,7 +905,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i32(i64*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i32(,,, i64*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32(,,, i64*, , , i64, i64) define @test_vloxseg3_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { @@ -916,7 +916,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i32( undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -937,7 +937,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i16(i64*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i16(,,, i64*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16(,,, i64*, , , i64, i64) define @test_vloxseg3_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { @@ -948,7 +948,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i16( undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -969,7 +969,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i8(i64*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i8(,,, i64*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8(,,, i64*, , , i64, i64) define @test_vloxseg3_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { @@ -980,7 +980,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i8( undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1001,7 +1001,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i64(i64*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i64(,,,, i64*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64(,,,, i64*, , , i64, i64) define @test_vloxseg4_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { @@ -1012,7 +1012,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i64( undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1034,7 +1034,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i32(i64*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i32(,,,, i64*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32(,,,, i64*, , , i64, i64) define @test_vloxseg4_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { @@ -1045,7 +1045,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i32( undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1067,7 +1067,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i16(i64*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i16(,,,, i64*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16(,,,, i64*, , , i64, i64) define @test_vloxseg4_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { @@ -1078,7 +1078,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i16( undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1100,7 +1100,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i8(i64*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i8(,,,, i64*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8(,,,, i64*, , , i64, i64) define @test_vloxseg4_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { @@ -1111,7 +1111,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i8( undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1133,7 +1133,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i64(i64*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i64(,,,,, i64*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64(,,,,, i64*, , , i64, i64) define @test_vloxseg5_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { @@ -1144,7 +1144,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i64( undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1167,7 +1167,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i32(i64*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i32(,,,,, i64*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32(,,,,, i64*, , , i64, i64) define @test_vloxseg5_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { @@ -1178,7 +1178,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i32( undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1201,7 +1201,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i16(i64*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i16(,,,,, i64*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16(,,,,, i64*, , , i64, i64) define @test_vloxseg5_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { @@ -1212,7 +1212,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i16( undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1235,7 +1235,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i8(i64*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i8(,,,,, i64*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8(,,,,, i64*, , , i64, i64) define @test_vloxseg5_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { @@ -1246,7 +1246,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i8( undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1269,7 +1269,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i64(i64*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i64(,,,,,, i64*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64(,,,,,, i64*, , , i64, i64) define @test_vloxseg6_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { @@ -1280,7 +1280,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i64( undef, undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1304,7 +1304,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i32(i64*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i32(,,,,,, i64*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32(,,,,,, i64*, , , i64, i64) define @test_vloxseg6_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { @@ -1315,7 +1315,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i32( undef, undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1339,7 +1339,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i16(i64*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i16(,,,,,, i64*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16(,,,,,, i64*, , , i64, i64) define @test_vloxseg6_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { @@ -1350,7 +1350,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i16( undef, undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1374,7 +1374,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i8(i64*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i8(,,,,,, i64*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8(,,,,,, i64*, , , i64, i64) define @test_vloxseg6_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { @@ -1385,7 +1385,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i8( undef, undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1409,7 +1409,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i64(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i64(,,,,,,, i64*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64(,,,,,,, i64*, , , i64, i64) define @test_vloxseg7_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { @@ -1420,7 +1420,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i64( undef, undef, undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1445,7 +1445,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i32(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i32(,,,,,,, i64*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32(,,,,,,, i64*, , , i64, i64) define @test_vloxseg7_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { @@ -1456,7 +1456,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1481,7 +1481,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i16(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i16(,,,,,,, i64*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16(,,,,,,, i64*, , , i64, i64) define @test_vloxseg7_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { @@ -1492,7 +1492,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1517,7 +1517,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i8(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i8(,,,,,,, i64*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8(,,,,,,, i64*, , , i64, i64) define @test_vloxseg7_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { @@ -1528,7 +1528,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1553,7 +1553,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i64(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i64(,,,,,,,, i64*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64(,,,,,,,, i64*, , , i64, i64) define @test_vloxseg8_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { @@ -1564,7 +1564,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1590,7 +1590,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i32(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i32(,,,,,,,, i64*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32(,,,,,,,, i64*, , , i64, i64) define @test_vloxseg8_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { @@ -1601,7 +1601,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1627,7 +1627,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i16(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i16(,,,,,,,, i64*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16(,,,,,,,, i64*, , , i64, i64) define @test_vloxseg8_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { @@ -1638,7 +1638,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1664,7 +1664,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i8(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i8(,,,,,,,, i64*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8(,,,,,,,, i64*, , , i64, i64) define @test_vloxseg8_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { @@ -1675,7 +1675,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1701,7 +1701,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i64(i32*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i64(,, i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64(,, i32*, , , i64, i64) define @test_vloxseg2_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { @@ -1712,7 +1712,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i64( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1731,7 +1731,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32(i32*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32(,, i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32(,, i32*, , , i64, i64) define @test_vloxseg2_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { @@ -1742,7 +1742,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1761,7 +1761,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16(i32*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16(,, i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16(,, i32*, , , i64, i64) define @test_vloxseg2_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { @@ -1772,7 +1772,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1791,7 +1791,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8(i32*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8(,, i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8(,, i32*, , , i64, i64) define @test_vloxseg2_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { @@ -1802,7 +1802,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1821,7 +1821,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i64(i32*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i64(,,, i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { @@ -1832,7 +1832,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i64( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1853,7 +1853,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32(i32*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32(,,, i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { @@ -1864,7 +1864,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1885,7 +1885,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16(i32*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16(,,, i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { @@ -1896,7 +1896,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1917,7 +1917,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8(i32*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8(,,, i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { @@ -1928,7 +1928,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1949,7 +1949,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i64(i32*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i64(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { @@ -1960,7 +1960,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i64( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1982,7 +1982,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32(i32*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { @@ -1993,7 +1993,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2015,7 +2015,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16(i32*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { @@ -2026,7 +2026,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2048,7 +2048,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8(i32*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { @@ -2059,7 +2059,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2081,7 +2081,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i64(i32*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i64(,,,,, i32*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64(,,,,, i32*, , , i64, i64) define @test_vloxseg5_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { @@ -2092,7 +2092,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i64( undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2115,7 +2115,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32(i32*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32(,,,,, i32*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32(,,,,, i32*, , , i64, i64) define @test_vloxseg5_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { @@ -2126,7 +2126,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2149,7 +2149,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16(i32*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16(,,,,, i32*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16(,,,,, i32*, , , i64, i64) define @test_vloxseg5_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { @@ -2160,7 +2160,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2183,7 +2183,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8(i32*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8(,,,,, i32*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8(,,,,, i32*, , , i64, i64) define @test_vloxseg5_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { @@ -2194,7 +2194,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2217,7 +2217,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i64(i32*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i64(,,,,,, i32*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64(,,,,,, i32*, , , i64, i64) define @test_vloxseg6_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { @@ -2228,7 +2228,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i64( undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2252,7 +2252,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32(i32*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32(,,,,,, i32*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32(,,,,,, i32*, , , i64, i64) define @test_vloxseg6_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { @@ -2263,7 +2263,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2287,7 +2287,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16(i32*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16(,,,,,, i32*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16(,,,,,, i32*, , , i64, i64) define @test_vloxseg6_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { @@ -2298,7 +2298,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2322,7 +2322,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8(i32*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8(,,,,,, i32*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8(,,,,,, i32*, , , i64, i64) define @test_vloxseg6_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { @@ -2333,7 +2333,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2357,7 +2357,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i64(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i64(,,,,,,, i32*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64(,,,,,,, i32*, , , i64, i64) define @test_vloxseg7_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { @@ -2368,7 +2368,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i64( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2393,7 +2393,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32(,,,,,,, i32*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32(,,,,,,, i32*, , , i64, i64) define @test_vloxseg7_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { @@ -2404,7 +2404,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2429,7 +2429,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16(,,,,,,, i32*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16(,,,,,,, i32*, , , i64, i64) define @test_vloxseg7_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { @@ -2440,7 +2440,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2465,7 +2465,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8(,,,,,,, i32*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8(,,,,,,, i32*, , , i64, i64) define @test_vloxseg7_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { @@ -2476,7 +2476,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2501,7 +2501,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i64(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i64(,,,,,,,, i32*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64(,,,,,,,, i32*, , , i64, i64) define @test_vloxseg8_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { @@ -2512,7 +2512,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2538,7 +2538,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32(,,,,,,,, i32*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, i32*, , , i64, i64) define @test_vloxseg8_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { @@ -2549,7 +2549,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2575,7 +2575,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16(,,,,,,,, i32*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, i32*, , , i64, i64) define @test_vloxseg8_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { @@ -2586,7 +2586,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2612,7 +2612,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8(,,,,,,,, i32*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, i32*, , , i64, i64) define @test_vloxseg8_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { @@ -2623,7 +2623,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2649,7 +2649,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16(,, i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16(,, i16*, , , i64, i64) define @test_vloxseg2_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { @@ -2660,7 +2660,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2679,7 +2679,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8(,, i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8(,, i16*, , , i64, i64) define @test_vloxseg2_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { @@ -2690,7 +2690,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2709,7 +2709,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i64(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i64(,, i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64(,, i16*, , , i64, i64) define @test_vloxseg2_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { @@ -2720,7 +2720,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i64( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2739,7 +2739,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32(,, i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32(,, i16*, , , i64, i64) define @test_vloxseg2_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { @@ -2750,7 +2750,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2769,7 +2769,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16(i16*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16(,,, i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { @@ -2780,7 +2780,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2801,7 +2801,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8(i16*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8(,,, i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { @@ -2812,7 +2812,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2833,7 +2833,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i64(i16*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i64(,,, i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { @@ -2844,7 +2844,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i64( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2864,7 +2864,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32(i16*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32(,,, i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { @@ -2875,7 +2875,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2895,7 +2895,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16(i16*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { @@ -2906,7 +2906,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2928,7 +2928,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8(i16*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { @@ -2939,7 +2939,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2961,7 +2961,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i64(i16*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i64(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { @@ -2972,7 +2972,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i64( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2993,7 +2993,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32(i16*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { @@ -3004,7 +3004,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3026,7 +3026,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32(,, i8*, , , i64, i64) define @test_vloxseg2_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { @@ -3037,7 +3037,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3056,7 +3056,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8(,, i8*, , , i64, i64) define @test_vloxseg2_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { @@ -3067,7 +3067,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3086,7 +3086,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i64(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i64(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64(,, i8*, , , i64, i64) define @test_vloxseg2_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { @@ -3097,7 +3097,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i64( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3116,7 +3116,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16(,, i8*, , , i64, i64) define @test_vloxseg2_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { @@ -3127,7 +3127,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3146,7 +3146,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32(i8*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32(,,, i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { @@ -3157,7 +3157,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3177,7 +3177,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8(i8*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8(,,, i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { @@ -3188,7 +3188,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3209,7 +3209,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i64(i8*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i64(,,, i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { @@ -3220,7 +3220,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i64( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3240,7 +3240,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16(i8*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16(,,, i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { @@ -3251,7 +3251,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3272,7 +3272,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32(i8*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { @@ -3283,7 +3283,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3305,7 +3305,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8(i8*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { @@ -3316,7 +3316,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3338,7 +3338,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i64(i8*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i64(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { @@ -3349,7 +3349,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i64( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3370,7 +3370,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16(i8*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { @@ -3381,7 +3381,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3403,7 +3403,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { @@ -3414,7 +3414,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3437,7 +3437,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { @@ -3448,7 +3448,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3471,7 +3471,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i64(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { @@ -3482,7 +3482,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i64( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3504,7 +3504,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { @@ -3515,7 +3515,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3538,7 +3538,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { @@ -3549,7 +3549,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3573,7 +3573,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { @@ -3584,7 +3584,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3608,7 +3608,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i64(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { @@ -3619,7 +3619,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i64( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3643,7 +3643,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { @@ -3654,7 +3654,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3678,7 +3678,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { @@ -3689,7 +3689,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3714,7 +3714,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { @@ -3725,7 +3725,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3750,7 +3750,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i64(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { @@ -3761,7 +3761,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i64( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3786,7 +3786,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { @@ -3797,7 +3797,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3822,7 +3822,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { @@ -3833,7 +3833,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3859,7 +3859,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { @@ -3870,7 +3870,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3896,7 +3896,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i64(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { @@ -3907,7 +3907,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i64( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3933,7 +3933,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { @@ -3944,7 +3944,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3970,7 +3970,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i64(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i64(,, i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64(,, i16*, , , i64, i64) define @test_vloxseg2_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { @@ -3981,7 +3981,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i64( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4000,7 +4000,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32(,, i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32(,, i16*, , , i64, i64) define @test_vloxseg2_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { @@ -4011,7 +4011,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4030,7 +4030,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16(,, i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16(,, i16*, , , i64, i64) define @test_vloxseg2_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { @@ -4041,7 +4041,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4060,7 +4060,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8(,, i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8(,, i16*, , , i64, i64) define @test_vloxseg2_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { @@ -4071,7 +4071,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4090,7 +4090,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i64(i16*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i64(,,, i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { @@ -4101,7 +4101,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i64( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4122,7 +4122,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32(i16*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32(,,, i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { @@ -4133,7 +4133,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4154,7 +4154,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16(i16*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16(,,, i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { @@ -4165,7 +4165,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4186,7 +4186,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8(i16*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8(,,, i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { @@ -4197,7 +4197,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4218,7 +4218,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i64(i16*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i64(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { @@ -4229,7 +4229,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i64( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4251,7 +4251,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32(i16*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { @@ -4262,7 +4262,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4284,7 +4284,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16(i16*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { @@ -4295,7 +4295,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4317,7 +4317,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8(i16*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { @@ -4328,7 +4328,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4350,7 +4350,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i64(i16*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i64(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { @@ -4361,7 +4361,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i64( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4384,7 +4384,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { @@ -4395,7 +4395,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4418,7 +4418,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { @@ -4429,7 +4429,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4452,7 +4452,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { @@ -4463,7 +4463,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4486,7 +4486,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i64(i16*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i64(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { @@ -4497,7 +4497,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i64( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4521,7 +4521,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { @@ -4532,7 +4532,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4556,7 +4556,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { @@ -4567,7 +4567,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4591,7 +4591,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { @@ -4602,7 +4602,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4626,7 +4626,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i64(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i64(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { @@ -4637,7 +4637,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i64( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4662,7 +4662,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { @@ -4673,7 +4673,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4698,7 +4698,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { @@ -4709,7 +4709,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4734,7 +4734,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { @@ -4745,7 +4745,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4770,7 +4770,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i64(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i64(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { @@ -4781,7 +4781,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4807,7 +4807,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { @@ -4818,7 +4818,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4844,7 +4844,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { @@ -4855,7 +4855,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4881,7 +4881,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { @@ -4892,7 +4892,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4918,7 +4918,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32(i32*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32(,, i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32(,, i32*, , , i64, i64) define @test_vloxseg2_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { @@ -4929,7 +4929,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4948,7 +4948,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8(i32*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8(,, i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8(,, i32*, , , i64, i64) define @test_vloxseg2_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { @@ -4959,7 +4959,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4978,7 +4978,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16(i32*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16(,, i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16(,, i32*, , , i64, i64) define @test_vloxseg2_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { @@ -4989,7 +4989,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5008,7 +5008,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i64(i32*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i64(,, i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64(,, i32*, , , i64, i64) define @test_vloxseg2_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { @@ -5019,7 +5019,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i64( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5038,7 +5038,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32(i32*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32(,,, i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { @@ -5049,7 +5049,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -5070,7 +5070,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8(i32*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8(,,, i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { @@ -5081,7 +5081,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -5102,7 +5102,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16(i32*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16(,,, i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { @@ -5113,7 +5113,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -5134,7 +5134,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i64(i32*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i64(,,, i32*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64(,,, i32*, , , i64, i64) define @test_vloxseg3_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { @@ -5145,7 +5145,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i64( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -5165,7 +5165,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32(i32*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { @@ -5176,7 +5176,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -5198,7 +5198,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8(i32*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { @@ -5209,7 +5209,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -5231,7 +5231,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16(i32*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { @@ -5242,7 +5242,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -5264,7 +5264,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i64(i32*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i64(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64(,,,, i32*, , , i64, i64) define @test_vloxseg4_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { @@ -5275,7 +5275,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i64( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -5297,7 +5297,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32(i32*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32(,,,,, i32*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32(,,,,, i32*, , , i64, i64) define @test_vloxseg5_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { @@ -5308,7 +5308,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -5331,7 +5331,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8(i32*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8(,,,,, i32*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8(,,,,, i32*, , , i64, i64) define @test_vloxseg5_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { @@ -5342,7 +5342,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -5365,7 +5365,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16(i32*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16(,,,,, i32*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16(,,,,, i32*, , , i64, i64) define @test_vloxseg5_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { @@ -5376,7 +5376,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -5399,7 +5399,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i64(i32*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i64(,,,,, i32*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64(,,,,, i32*, , , i64, i64) define @test_vloxseg5_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { @@ -5410,7 +5410,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i64( undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -5433,7 +5433,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32(i32*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32(,,,,,, i32*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32(,,,,,, i32*, , , i64, i64) define @test_vloxseg6_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { @@ -5444,7 +5444,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -5468,7 +5468,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8(i32*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8(,,,,,, i32*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8(,,,,,, i32*, , , i64, i64) define @test_vloxseg6_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { @@ -5479,7 +5479,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -5503,7 +5503,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16(i32*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16(,,,,,, i32*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16(,,,,,, i32*, , , i64, i64) define @test_vloxseg6_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { @@ -5514,7 +5514,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -5538,7 +5538,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i64(i32*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i64(,,,,,, i32*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64(,,,,,, i32*, , , i64, i64) define @test_vloxseg6_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { @@ -5549,7 +5549,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i64( undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -5573,7 +5573,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32(,,,,,,, i32*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32(,,,,,,, i32*, , , i64, i64) define @test_vloxseg7_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { @@ -5584,7 +5584,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -5609,7 +5609,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8(,,,,,,, i32*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8(,,,,,,, i32*, , , i64, i64) define @test_vloxseg7_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { @@ -5620,7 +5620,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -5645,7 +5645,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16(,,,,,,, i32*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16(,,,,,,, i32*, , , i64, i64) define @test_vloxseg7_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { @@ -5656,7 +5656,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -5681,7 +5681,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i64(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i64(,,,,,,, i32*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64(,,,,,,, i32*, , , i64, i64) define @test_vloxseg7_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { @@ -5692,7 +5692,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i64( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -5717,7 +5717,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32(,,,,,,,, i32*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, i32*, , , i64, i64) define @test_vloxseg8_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { @@ -5728,7 +5728,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -5754,7 +5754,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8(,,,,,,,, i32*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, i32*, , , i64, i64) define @test_vloxseg8_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { @@ -5765,7 +5765,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -5791,7 +5791,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16(,,,,,,,, i32*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, i32*, , , i64, i64) define @test_vloxseg8_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { @@ -5802,7 +5802,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -5828,7 +5828,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i64(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i64(,,,,,,,, i32*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64(,,,,,,,, i32*, , , i64, i64) define @test_vloxseg8_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { @@ -5839,7 +5839,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i64( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -5865,7 +5865,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16(,, i8*, , , i64, i64) define @test_vloxseg2_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { @@ -5876,7 +5876,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5895,7 +5895,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8(,, i8*, , , i64, i64) define @test_vloxseg2_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { @@ -5906,7 +5906,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5925,7 +5925,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i64(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i64(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64(,, i8*, , , i64, i64) define @test_vloxseg2_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { @@ -5936,7 +5936,7 @@ ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i64( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5955,7 +5955,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32(,, i8*, , , i64, i64) define @test_vloxseg2_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { @@ -5966,7 +5966,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5985,7 +5985,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16(i8*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16(,,, i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { @@ -5996,7 +5996,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -6016,7 +6016,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8(i8*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8(,,, i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { @@ -6027,7 +6027,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -6048,7 +6048,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i64(i8*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i64(,,, i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { @@ -6059,7 +6059,7 @@ ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i64( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -6079,7 +6079,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32(i8*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32(,,, i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { @@ -6090,7 +6090,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -6110,7 +6110,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16(i8*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { @@ -6121,7 +6121,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -6143,7 +6143,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8(i8*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { @@ -6154,7 +6154,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -6176,7 +6176,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i64(i8*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i64(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { @@ -6187,7 +6187,7 @@ ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i64( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -6208,7 +6208,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32(i8*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { @@ -6219,7 +6219,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -6240,7 +6240,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { @@ -6251,7 +6251,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -6274,7 +6274,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { @@ -6285,7 +6285,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -6308,7 +6308,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i64(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { @@ -6319,7 +6319,7 @@ ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i64( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -6341,7 +6341,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { @@ -6352,7 +6352,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -6374,7 +6374,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { @@ -6385,7 +6385,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -6409,7 +6409,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { @@ -6420,7 +6420,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -6444,7 +6444,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i64(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { @@ -6455,7 +6455,7 @@ ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i64( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -6478,7 +6478,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { @@ -6489,7 +6489,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -6513,7 +6513,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { @@ -6524,7 +6524,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -6549,7 +6549,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { @@ -6560,7 +6560,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -6585,7 +6585,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i64(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { @@ -6596,7 +6596,7 @@ ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i64( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -6620,7 +6620,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { @@ -6631,7 +6631,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -6656,7 +6656,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { @@ -6667,7 +6667,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -6693,7 +6693,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { @@ -6704,7 +6704,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -6730,7 +6730,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i64(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { @@ -6741,7 +6741,7 @@ ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i64( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -6766,7 +6766,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { @@ -6777,7 +6777,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -6803,7 +6803,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i32(i64*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i32(,, i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32(,, i64*, , , i64, i64) define @test_vloxseg2_nxv4i64_nxv4i32(i64* %base, %index, i64 %vl) { @@ -6814,7 +6814,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i32(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i32( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -6833,7 +6833,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i8(i64*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i8(,, i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8(,, i64*, , , i64, i64) define @test_vloxseg2_nxv4i64_nxv4i8(i64* %base, %index, i64 %vl) { @@ -6844,7 +6844,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i8(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i8( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -6863,7 +6863,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i64(i64*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i64(,, i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64(,, i64*, , , i64, i64) define @test_vloxseg2_nxv4i64_nxv4i64(i64* %base, %index, i64 %vl) { @@ -6874,7 +6874,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i64(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i64( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -6893,7 +6893,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i16(i64*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i16(,, i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16(,, i64*, , , i64, i64) define @test_vloxseg2_nxv4i64_nxv4i16(i64* %base, %index, i64 %vl) { @@ -6904,7 +6904,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i16(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i16( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -6923,7 +6923,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32(,, i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32(,, i16*, , , i64, i64) define @test_vloxseg2_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { @@ -6934,7 +6934,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -6953,7 +6953,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8(,, i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8(,, i16*, , , i64, i64) define @test_vloxseg2_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { @@ -6964,7 +6964,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -6983,7 +6983,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i64(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i64(,, i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64(,, i16*, , , i64, i64) define @test_vloxseg2_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { @@ -6994,7 +6994,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i64( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7013,7 +7013,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16(,, i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16(,, i16*, , , i64, i64) define @test_vloxseg2_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { @@ -7024,7 +7024,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7043,7 +7043,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32(i16*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32(,,, i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { @@ -7054,7 +7054,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -7074,7 +7074,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8(i16*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8(,,, i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { @@ -7085,7 +7085,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -7106,7 +7106,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i64(i16*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i64(,,, i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { @@ -7117,7 +7117,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i64( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -7137,7 +7137,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16(i16*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16(,,, i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { @@ -7148,7 +7148,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -7169,7 +7169,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32(i16*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { @@ -7180,7 +7180,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -7202,7 +7202,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8(i16*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { @@ -7213,7 +7213,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -7235,7 +7235,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i64(i16*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i64(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { @@ -7246,7 +7246,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i64( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -7267,7 +7267,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16(i16*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { @@ -7278,7 +7278,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -7300,7 +7300,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { @@ -7311,7 +7311,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -7334,7 +7334,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { @@ -7345,7 +7345,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -7368,7 +7368,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i64(i16*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i64(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { @@ -7379,7 +7379,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i64( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -7401,7 +7401,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { @@ -7412,7 +7412,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -7435,7 +7435,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { @@ -7446,7 +7446,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -7470,7 +7470,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { @@ -7481,7 +7481,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -7505,7 +7505,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i64(i16*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i64(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { @@ -7516,7 +7516,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i64( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -7540,7 +7540,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { @@ -7551,7 +7551,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -7575,7 +7575,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { @@ -7586,7 +7586,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -7611,7 +7611,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { @@ -7622,7 +7622,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -7647,7 +7647,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i64(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i64(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { @@ -7658,7 +7658,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i64( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -7683,7 +7683,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { @@ -7694,7 +7694,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -7719,7 +7719,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { @@ -7730,7 +7730,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -7756,7 +7756,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { @@ -7767,7 +7767,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -7793,7 +7793,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i64(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i64(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { @@ -7804,7 +7804,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i64( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -7830,7 +7830,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { @@ -7841,7 +7841,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -7867,7 +7867,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i64(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i64(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64(,, i8*, , , i64, i64) define @test_vloxseg2_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { @@ -7878,7 +7878,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i64( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7897,7 +7897,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32(,, i8*, , , i64, i64) define @test_vloxseg2_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { @@ -7908,7 +7908,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7927,7 +7927,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16(,, i8*, , , i64, i64) define @test_vloxseg2_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { @@ -7938,7 +7938,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7957,7 +7957,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8(,, i8*, , , i64, i64) define @test_vloxseg2_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { @@ -7968,7 +7968,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7987,7 +7987,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i64(i8*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i64(,,, i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { @@ -7998,7 +7998,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i64( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -8019,7 +8019,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32(i8*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32(,,, i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { @@ -8030,7 +8030,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -8051,7 +8051,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16(i8*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16(,,, i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { @@ -8062,7 +8062,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -8083,7 +8083,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8(i8*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8(,,, i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { @@ -8094,7 +8094,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -8115,7 +8115,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i64(i8*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i64(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { @@ -8126,7 +8126,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i64( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -8148,7 +8148,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32(i8*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { @@ -8159,7 +8159,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -8181,7 +8181,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16(i8*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { @@ -8192,7 +8192,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -8214,7 +8214,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8(i8*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { @@ -8225,7 +8225,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -8247,7 +8247,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i64(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { @@ -8258,7 +8258,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i64( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -8281,7 +8281,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { @@ -8292,7 +8292,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -8315,7 +8315,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { @@ -8326,7 +8326,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -8349,7 +8349,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { @@ -8360,7 +8360,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -8383,7 +8383,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i64(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { @@ -8394,7 +8394,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i64( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -8418,7 +8418,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { @@ -8429,7 +8429,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -8453,7 +8453,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { @@ -8464,7 +8464,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -8488,7 +8488,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { @@ -8499,7 +8499,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -8523,7 +8523,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i64(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { @@ -8534,7 +8534,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i64( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -8559,7 +8559,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { @@ -8570,7 +8570,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -8595,7 +8595,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { @@ -8606,7 +8606,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -8631,7 +8631,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { @@ -8642,7 +8642,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -8667,7 +8667,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i64(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { @@ -8678,7 +8678,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -8704,7 +8704,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { @@ -8715,7 +8715,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -8741,7 +8741,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { @@ -8752,7 +8752,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -8778,7 +8778,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { @@ -8789,7 +8789,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -8815,7 +8815,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32(,, i8*, , , i64, i64) define @test_vloxseg2_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { @@ -8826,7 +8826,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -8845,7 +8845,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8(,, i8*, , , i64, i64) define @test_vloxseg2_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { @@ -8856,7 +8856,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -8875,7 +8875,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16(,, i8*, , , i64, i64) define @test_vloxseg2_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { @@ -8886,7 +8886,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -8905,7 +8905,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i64(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i64(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64(,, i8*, , , i64, i64) define @test_vloxseg2_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { @@ -8916,7 +8916,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i64( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -8935,7 +8935,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32(i8*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32(,,, i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { @@ -8946,7 +8946,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -8967,7 +8967,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8(i8*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8(,,, i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { @@ -8978,7 +8978,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -8999,7 +8999,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16(i8*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16(,,, i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { @@ -9010,7 +9010,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -9031,7 +9031,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i64(i8*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i64(,,, i8*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64(,,, i8*, , , i64, i64) define @test_vloxseg3_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { @@ -9042,7 +9042,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i64( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -9062,7 +9062,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32(i8*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { @@ -9073,7 +9073,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -9095,7 +9095,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8(i8*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { @@ -9106,7 +9106,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -9128,7 +9128,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16(i8*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { @@ -9139,7 +9139,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -9161,7 +9161,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i64(i8*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i64(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64(,,,, i8*, , , i64, i64) define @test_vloxseg4_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { @@ -9172,7 +9172,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i64( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -9194,7 +9194,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { @@ -9205,7 +9205,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -9228,7 +9228,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { @@ -9239,7 +9239,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -9262,7 +9262,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { @@ -9273,7 +9273,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -9296,7 +9296,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i64(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64(,,,,, i8*, , , i64, i64) define @test_vloxseg5_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { @@ -9307,7 +9307,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i64( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -9330,7 +9330,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { @@ -9341,7 +9341,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -9365,7 +9365,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { @@ -9376,7 +9376,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -9400,7 +9400,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { @@ -9411,7 +9411,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -9435,7 +9435,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i64(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64(,,,,,, i8*, , , i64, i64) define @test_vloxseg6_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { @@ -9446,7 +9446,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i64( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -9470,7 +9470,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { @@ -9481,7 +9481,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -9506,7 +9506,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { @@ -9517,7 +9517,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -9542,7 +9542,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { @@ -9553,7 +9553,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -9578,7 +9578,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i64(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64(,,,,,,, i8*, , , i64, i64) define @test_vloxseg7_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { @@ -9589,7 +9589,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i64( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -9614,7 +9614,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { @@ -9625,7 +9625,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -9651,7 +9651,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { @@ -9662,7 +9662,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -9688,7 +9688,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { @@ -9699,7 +9699,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -9725,7 +9725,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i64(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64(,,,,,,,, i8*, , , i64, i64) define @test_vloxseg8_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { @@ -9736,7 +9736,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i64( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -9762,7 +9762,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16(i32*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16(,, i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16(,, i32*, , , i64, i64) define @test_vloxseg2_nxv8i32_nxv8i16(i32* %base, %index, i64 %vl) { @@ -9773,7 +9773,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9792,7 +9792,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8(i32*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8(,, i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8(,, i32*, , , i64, i64) define @test_vloxseg2_nxv8i32_nxv8i8(i32* %base, %index, i64 %vl) { @@ -9803,7 +9803,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9822,7 +9822,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i64(i32*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i64(,, i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64(,, i32*, , , i64, i64) define @test_vloxseg2_nxv8i32_nxv8i64(i32* %base, %index, i64 %vl) { @@ -9833,7 +9833,7 @@ ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i64(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i64( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9852,7 +9852,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32(i32*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32(,, i32*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32(,, i32*, , , i64, i64) define @test_vloxseg2_nxv8i32_nxv8i32(i32* %base, %index, i64 %vl) { @@ -9863,7 +9863,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9882,7 +9882,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16(,, i8*, , , i64, i64) define @test_vloxseg2_nxv32i8_nxv32i16(i8* %base, %index, i64 %vl) { @@ -9893,7 +9893,7 @@ ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9912,7 +9912,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8(i8*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8(,, i8*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8(,, i8*, , , i64, i64) define @test_vloxseg2_nxv32i8_nxv32i8(i8* %base, %index, i64 %vl) { @@ -9923,7 +9923,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9942,7 +9942,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32(,, i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32(,, i16*, , , i64, i64) define @test_vloxseg2_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { @@ -9953,7 +9953,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9972,7 +9972,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8(,, i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8(,, i16*, , , i64, i64) define @test_vloxseg2_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { @@ -9983,7 +9983,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10002,7 +10002,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16(,, i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16(,, i16*, , , i64, i64) define @test_vloxseg2_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { @@ -10013,7 +10013,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10032,7 +10032,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i64(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i64(,, i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64(,, i16*, , , i64, i64) define @test_vloxseg2_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { @@ -10043,7 +10043,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i64( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10062,7 +10062,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32(i16*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32(,,, i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { @@ -10073,7 +10073,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -10094,7 +10094,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8(i16*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8(,,, i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { @@ -10105,7 +10105,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -10126,7 +10126,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16(i16*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16(,,, i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { @@ -10137,7 +10137,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -10158,7 +10158,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i64(i16*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i64(,,, i16*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64(,,, i16*, , , i64, i64) define @test_vloxseg3_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { @@ -10169,7 +10169,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i64( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -10189,7 +10189,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32(i16*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { @@ -10200,7 +10200,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -10222,7 +10222,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8(i16*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { @@ -10233,7 +10233,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -10255,7 +10255,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16(i16*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { @@ -10266,7 +10266,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -10288,7 +10288,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i64(i16*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i64(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64(,,,, i16*, , , i64, i64) define @test_vloxseg4_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { @@ -10299,7 +10299,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i64( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -10321,7 +10321,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { @@ -10332,7 +10332,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -10355,7 +10355,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { @@ -10366,7 +10366,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -10389,7 +10389,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { @@ -10400,7 +10400,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -10423,7 +10423,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i64(i16*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i64(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64(,,,,, i16*, , , i64, i64) define @test_vloxseg5_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { @@ -10434,7 +10434,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i64( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -10457,7 +10457,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { @@ -10468,7 +10468,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -10492,7 +10492,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { @@ -10503,7 +10503,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -10527,7 +10527,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { @@ -10538,7 +10538,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -10562,7 +10562,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i64(i16*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i64(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64(,,,,,, i16*, , , i64, i64) define @test_vloxseg6_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { @@ -10573,7 +10573,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i64( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -10597,7 +10597,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { @@ -10608,7 +10608,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -10633,7 +10633,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { @@ -10644,7 +10644,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -10669,7 +10669,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { @@ -10680,7 +10680,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -10705,7 +10705,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i64(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i64(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64(,,,,,,, i16*, , , i64, i64) define @test_vloxseg7_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { @@ -10716,7 +10716,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i64( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -10741,7 +10741,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { @@ -10752,7 +10752,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -10778,7 +10778,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { @@ -10789,7 +10789,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -10815,7 +10815,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { @@ -10826,7 +10826,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -10852,7 +10852,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i64(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i64(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64(,,,,,,,, i16*, , , i64, i64) define @test_vloxseg8_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { @@ -10863,7 +10863,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i64( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -10889,7 +10889,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i32(i64*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i32(,, i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32(,, i64*, , , i64, i64) define @test_vloxseg2_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { @@ -10900,7 +10900,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i32( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10919,7 +10919,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i8(i64*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i8(,, i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8(,, i64*, , , i64, i64) define @test_vloxseg2_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { @@ -10930,7 +10930,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i8( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10949,7 +10949,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i16(i64*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i16(,, i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16(,, i64*, , , i64, i64) define @test_vloxseg2_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { @@ -10960,7 +10960,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i16( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10979,7 +10979,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i64(i64*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i64(,, i64*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64(,, i64*, , , i64, i64) define @test_vloxseg2_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { @@ -10990,7 +10990,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i64( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11009,7 +11009,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i32(i64*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i32(,,, i64*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32(,,, i64*, , , i64, i64) define @test_vloxseg3_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { @@ -11020,7 +11020,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i32( undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11041,7 +11041,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i8(i64*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i8(,,, i64*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8(,,, i64*, , , i64, i64) define @test_vloxseg3_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { @@ -11052,7 +11052,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i8( undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11073,7 +11073,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i16(i64*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i16(,,, i64*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16(,,, i64*, , , i64, i64) define @test_vloxseg3_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { @@ -11084,7 +11084,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i16( undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11105,7 +11105,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i64(i64*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i64(,,, i64*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64(,,, i64*, , , i64, i64) define @test_vloxseg3_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { @@ -11116,7 +11116,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i64( undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11137,7 +11137,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i32(i64*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i32(,,,, i64*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32(,,,, i64*, , , i64, i64) define @test_vloxseg4_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { @@ -11148,7 +11148,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i32( undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11170,7 +11170,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i8(i64*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i8(,,,, i64*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8(,,,, i64*, , , i64, i64) define @test_vloxseg4_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { @@ -11181,7 +11181,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i8( undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11203,7 +11203,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i16(i64*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i16(,,,, i64*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16(,,,, i64*, , , i64, i64) define @test_vloxseg4_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { @@ -11214,7 +11214,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i16( undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11236,7 +11236,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i64(i64*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i64(,,,, i64*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64(,,,, i64*, , , i64, i64) define @test_vloxseg4_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { @@ -11247,7 +11247,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i64( undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11269,7 +11269,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16(half*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16(,, half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16(,, half*, , , i64, i64) define @test_vloxseg2_nxv16f16_nxv16i16(half* %base, %index, i64 %vl) { @@ -11280,7 +11280,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11299,7 +11299,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8(half*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8(,, half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8(,, half*, , , i64, i64) define @test_vloxseg2_nxv16f16_nxv16i8(half* %base, %index, i64 %vl) { @@ -11310,7 +11310,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11329,7 +11329,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32(half*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32(,, half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32(,, half*, , , i64, i64) define @test_vloxseg2_nxv16f16_nxv16i32(half* %base, %index, i64 %vl) { @@ -11340,7 +11340,7 @@ ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11359,7 +11359,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32(double*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32(,, double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32(,, double*, , , i64, i64) define @test_vloxseg2_nxv4f64_nxv4i32(double* %base, %index, i64 %vl) { @@ -11370,7 +11370,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11389,7 +11389,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8(double*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8(,, double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8(,, double*, , , i64, i64) define @test_vloxseg2_nxv4f64_nxv4i8(double* %base, %index, i64 %vl) { @@ -11400,7 +11400,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11419,7 +11419,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i64(double*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i64(,, double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64(,, double*, , , i64, i64) define @test_vloxseg2_nxv4f64_nxv4i64(double* %base, %index, i64 %vl) { @@ -11430,7 +11430,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i64(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i64( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11449,7 +11449,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16(double*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16(,, double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16(,, double*, , , i64, i64) define @test_vloxseg2_nxv4f64_nxv4i16(double* %base, %index, i64 %vl) { @@ -11460,7 +11460,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11479,7 +11479,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i64(double*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i64(,, double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64(,, double*, , , i64, i64) define @test_vloxseg2_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { @@ -11490,7 +11490,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i64( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11509,7 +11509,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32(double*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32(,, double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32(,, double*, , , i64, i64) define @test_vloxseg2_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { @@ -11520,7 +11520,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11539,7 +11539,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16(double*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16(,, double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16(,, double*, , , i64, i64) define @test_vloxseg2_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { @@ -11550,7 +11550,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11569,7 +11569,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8(double*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8(,, double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8(,, double*, , , i64, i64) define @test_vloxseg2_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { @@ -11580,7 +11580,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11599,7 +11599,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i64(double*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i64(,,, double*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64(,,, double*, , , i64, i64) define @test_vloxseg3_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { @@ -11610,7 +11610,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i64( undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11631,7 +11631,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32(double*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32(,,, double*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32(,,, double*, , , i64, i64) define @test_vloxseg3_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { @@ -11642,7 +11642,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32( undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11663,7 +11663,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16(double*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16(,,, double*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16(,,, double*, , , i64, i64) define @test_vloxseg3_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { @@ -11674,7 +11674,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16( undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11695,7 +11695,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8(double*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8(,,, double*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8(,,, double*, , , i64, i64) define @test_vloxseg3_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { @@ -11706,7 +11706,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8( undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11727,7 +11727,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i64(double*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i64(,,,, double*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64(,,,, double*, , , i64, i64) define @test_vloxseg4_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { @@ -11738,7 +11738,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i64( undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11760,7 +11760,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32(double*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32(,,,, double*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32(,,,, double*, , , i64, i64) define @test_vloxseg4_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { @@ -11771,7 +11771,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32( undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11793,7 +11793,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16(double*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16(,,,, double*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16(,,,, double*, , , i64, i64) define @test_vloxseg4_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { @@ -11804,7 +11804,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16( undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11826,7 +11826,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8(double*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8(,,,, double*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8(,,,, double*, , , i64, i64) define @test_vloxseg4_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { @@ -11837,7 +11837,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8( undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11859,7 +11859,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i64(double*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i64(,,,,, double*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64(,,,,, double*, , , i64, i64) define @test_vloxseg5_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { @@ -11870,7 +11870,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i64( undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -11893,7 +11893,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32(double*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32(,,,,, double*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32(,,,,, double*, , , i64, i64) define @test_vloxseg5_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { @@ -11904,7 +11904,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -11927,7 +11927,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16(double*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16(,,,,, double*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16(,,,,, double*, , , i64, i64) define @test_vloxseg5_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { @@ -11938,7 +11938,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -11961,7 +11961,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8(double*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8(,,,,, double*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8(,,,,, double*, , , i64, i64) define @test_vloxseg5_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { @@ -11972,7 +11972,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -11995,7 +11995,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i64(double*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i64(,,,,,, double*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64(,,,,,, double*, , , i64, i64) define @test_vloxseg6_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { @@ -12006,7 +12006,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i64( undef, undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -12030,7 +12030,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32(double*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32(,,,,,, double*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32(,,,,,, double*, , , i64, i64) define @test_vloxseg6_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { @@ -12041,7 +12041,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -12065,7 +12065,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16(double*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16(,,,,,, double*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16(,,,,,, double*, , , i64, i64) define @test_vloxseg6_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { @@ -12076,7 +12076,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -12100,7 +12100,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8(double*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8(,,,,,, double*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8(,,,,,, double*, , , i64, i64) define @test_vloxseg6_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { @@ -12111,7 +12111,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -12135,7 +12135,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i64(double*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i64(,,,,,,, double*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64(,,,,,,, double*, , , i64, i64) define @test_vloxseg7_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { @@ -12146,7 +12146,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i64( undef, undef, undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -12171,7 +12171,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32(double*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32(,,,,,,, double*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32(,,,,,,, double*, , , i64, i64) define @test_vloxseg7_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { @@ -12182,7 +12182,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -12207,7 +12207,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16(double*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16(,,,,,,, double*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16(,,,,,,, double*, , , i64, i64) define @test_vloxseg7_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { @@ -12218,7 +12218,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -12243,7 +12243,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8(double*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8(,,,,,,, double*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8(,,,,,,, double*, , , i64, i64) define @test_vloxseg7_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { @@ -12254,7 +12254,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -12279,7 +12279,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i64(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i64(,,,,,,,, double*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64(,,,,,,,, double*, , , i64, i64) define @test_vloxseg8_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { @@ -12290,7 +12290,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -12316,7 +12316,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32(,,,,,,,, double*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, double*, , , i64, i64) define @test_vloxseg8_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { @@ -12327,7 +12327,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -12353,7 +12353,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16(,,,,,,,, double*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, double*, , , i64, i64) define @test_vloxseg8_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { @@ -12364,7 +12364,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -12390,7 +12390,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8(,,,,,,,, double*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, double*, , , i64, i64) define @test_vloxseg8_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { @@ -12401,7 +12401,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -12427,7 +12427,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32(float*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32(,, float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32(,, float*, , , i64, i64) define @test_vloxseg2_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { @@ -12438,7 +12438,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -12457,7 +12457,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8(float*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8(,, float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8(,, float*, , , i64, i64) define @test_vloxseg2_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { @@ -12468,7 +12468,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -12487,7 +12487,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16(float*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16(,, float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16(,, float*, , , i64, i64) define @test_vloxseg2_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { @@ -12498,7 +12498,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -12517,7 +12517,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i64(float*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i64(,, float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64(,, float*, , , i64, i64) define @test_vloxseg2_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { @@ -12528,7 +12528,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i64( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -12547,7 +12547,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32(float*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32(,,, float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32(,,, float*, , , i64, i64) define @test_vloxseg3_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { @@ -12558,7 +12558,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -12579,7 +12579,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8(float*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8(,,, float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8(,,, float*, , , i64, i64) define @test_vloxseg3_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { @@ -12590,7 +12590,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -12611,7 +12611,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16(float*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16(,,, float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16(,,, float*, , , i64, i64) define @test_vloxseg3_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { @@ -12622,7 +12622,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -12643,7 +12643,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i64(float*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i64(,,, float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64(,,, float*, , , i64, i64) define @test_vloxseg3_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { @@ -12654,7 +12654,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i64( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -12674,7 +12674,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32(float*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32(,,,, float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { @@ -12685,7 +12685,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -12707,7 +12707,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8(float*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8(,,,, float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { @@ -12718,7 +12718,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -12740,7 +12740,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16(float*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16(,,,, float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { @@ -12751,7 +12751,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -12773,7 +12773,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i64(float*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i64(,,,, float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { @@ -12784,7 +12784,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i64( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -12806,7 +12806,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32(float*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32(,,,,, float*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32(,,,,, float*, , , i64, i64) define @test_vloxseg5_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { @@ -12817,7 +12817,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -12840,7 +12840,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8(float*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8(,,,,, float*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8(,,,,, float*, , , i64, i64) define @test_vloxseg5_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { @@ -12851,7 +12851,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -12874,7 +12874,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16(float*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16(,,,,, float*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16(,,,,, float*, , , i64, i64) define @test_vloxseg5_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { @@ -12885,7 +12885,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -12908,7 +12908,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i64(float*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i64(,,,,, float*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64(,,,,, float*, , , i64, i64) define @test_vloxseg5_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { @@ -12919,7 +12919,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i64( undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -12942,7 +12942,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32(float*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32(,,,,,, float*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32(,,,,,, float*, , , i64, i64) define @test_vloxseg6_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { @@ -12953,7 +12953,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -12977,7 +12977,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8(float*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8(,,,,,, float*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8(,,,,,, float*, , , i64, i64) define @test_vloxseg6_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { @@ -12988,7 +12988,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -13012,7 +13012,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16(float*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16(,,,,,, float*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16(,,,,,, float*, , , i64, i64) define @test_vloxseg6_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { @@ -13023,7 +13023,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -13047,7 +13047,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i64(float*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i64(,,,,,, float*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64(,,,,,, float*, , , i64, i64) define @test_vloxseg6_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { @@ -13058,7 +13058,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i64( undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -13082,7 +13082,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32(float*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32(,,,,,,, float*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32(,,,,,,, float*, , , i64, i64) define @test_vloxseg7_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { @@ -13093,7 +13093,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -13118,7 +13118,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8(float*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8(,,,,,,, float*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8(,,,,,,, float*, , , i64, i64) define @test_vloxseg7_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { @@ -13129,7 +13129,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -13154,7 +13154,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16(float*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16(,,,,,,, float*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16(,,,,,,, float*, , , i64, i64) define @test_vloxseg7_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { @@ -13165,7 +13165,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -13190,7 +13190,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i64(float*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i64(,,,,,,, float*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64(,,,,,,, float*, , , i64, i64) define @test_vloxseg7_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { @@ -13201,7 +13201,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i64( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -13226,7 +13226,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32(,,,,,,,, float*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, float*, , , i64, i64) define @test_vloxseg8_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { @@ -13237,7 +13237,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -13263,7 +13263,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8(,,,,,,,, float*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, float*, , , i64, i64) define @test_vloxseg8_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { @@ -13274,7 +13274,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -13300,7 +13300,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16(,,,,,,,, float*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, float*, , , i64, i64) define @test_vloxseg8_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { @@ -13311,7 +13311,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -13337,7 +13337,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i64(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i64(,,,,,,,, float*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64(,,,,,,,, float*, , , i64, i64) define @test_vloxseg8_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { @@ -13348,7 +13348,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i64( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -13374,7 +13374,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i64(half*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i64(,, half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64(,, half*, , , i64, i64) define @test_vloxseg2_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { @@ -13385,7 +13385,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i64( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -13404,7 +13404,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32(half*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32(,, half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32(,, half*, , , i64, i64) define @test_vloxseg2_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { @@ -13415,7 +13415,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -13434,7 +13434,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16(half*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16(,, half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16(,, half*, , , i64, i64) define @test_vloxseg2_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { @@ -13445,7 +13445,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -13464,7 +13464,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8(half*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8(,, half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8(,, half*, , , i64, i64) define @test_vloxseg2_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { @@ -13475,7 +13475,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -13494,7 +13494,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i64(half*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i64(,,, half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64(,,, half*, , , i64, i64) define @test_vloxseg3_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { @@ -13505,7 +13505,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i64( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -13526,7 +13526,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32(half*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32(,,, half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32(,,, half*, , , i64, i64) define @test_vloxseg3_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { @@ -13537,7 +13537,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -13558,7 +13558,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16(half*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16(,,, half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16(,,, half*, , , i64, i64) define @test_vloxseg3_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { @@ -13569,7 +13569,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -13590,7 +13590,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8(half*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8(,,, half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8(,,, half*, , , i64, i64) define @test_vloxseg3_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { @@ -13601,7 +13601,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -13622,7 +13622,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i64(half*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i64(,,,, half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { @@ -13633,7 +13633,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i64( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -13655,7 +13655,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32(half*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32(,,,, half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { @@ -13666,7 +13666,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -13688,7 +13688,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16(half*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16(,,,, half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { @@ -13699,7 +13699,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -13721,7 +13721,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8(half*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8(,,,, half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { @@ -13732,7 +13732,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -13754,7 +13754,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i64(half*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i64(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { @@ -13765,7 +13765,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i64( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -13788,7 +13788,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32(half*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { @@ -13799,7 +13799,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -13822,7 +13822,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16(half*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { @@ -13833,7 +13833,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -13856,7 +13856,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8(half*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { @@ -13867,7 +13867,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -13890,7 +13890,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i64(half*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i64(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { @@ -13901,7 +13901,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i64( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -13925,7 +13925,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { @@ -13936,7 +13936,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -13960,7 +13960,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { @@ -13971,7 +13971,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -13995,7 +13995,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { @@ -14006,7 +14006,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -14030,7 +14030,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i64(half*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i64(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { @@ -14041,7 +14041,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i64( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -14066,7 +14066,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { @@ -14077,7 +14077,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -14102,7 +14102,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { @@ -14113,7 +14113,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -14138,7 +14138,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { @@ -14149,7 +14149,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -14174,7 +14174,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i64(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i64(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { @@ -14185,7 +14185,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -14211,7 +14211,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { @@ -14222,7 +14222,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -14248,7 +14248,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { @@ -14259,7 +14259,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -14285,7 +14285,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { @@ -14296,7 +14296,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -14322,7 +14322,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i64(float*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i64(,, float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64(,, float*, , , i64, i64) define @test_vloxseg2_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { @@ -14333,7 +14333,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i64( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -14352,7 +14352,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32(float*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32(,, float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32(,, float*, , , i64, i64) define @test_vloxseg2_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { @@ -14363,7 +14363,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -14382,7 +14382,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16(float*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16(,, float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16(,, float*, , , i64, i64) define @test_vloxseg2_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { @@ -14393,7 +14393,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -14412,7 +14412,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8(float*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8(,, float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8(,, float*, , , i64, i64) define @test_vloxseg2_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { @@ -14423,7 +14423,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -14442,7 +14442,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i64(float*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i64(,,, float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64(,,, float*, , , i64, i64) define @test_vloxseg3_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { @@ -14453,7 +14453,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i64( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -14474,7 +14474,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32(float*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32(,,, float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32(,,, float*, , , i64, i64) define @test_vloxseg3_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { @@ -14485,7 +14485,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -14506,7 +14506,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16(float*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16(,,, float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16(,,, float*, , , i64, i64) define @test_vloxseg3_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { @@ -14517,7 +14517,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -14538,7 +14538,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8(float*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8(,,, float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8(,,, float*, , , i64, i64) define @test_vloxseg3_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { @@ -14549,7 +14549,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -14570,7 +14570,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i64(float*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i64(,,,, float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { @@ -14581,7 +14581,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i64( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -14603,7 +14603,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32(float*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32(,,,, float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { @@ -14614,7 +14614,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -14636,7 +14636,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16(float*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16(,,,, float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { @@ -14647,7 +14647,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -14669,7 +14669,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8(float*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8(,,,, float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { @@ -14680,7 +14680,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -14702,7 +14702,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i64(float*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i64(,,,,, float*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64(,,,,, float*, , , i64, i64) define @test_vloxseg5_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { @@ -14713,7 +14713,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i64( undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -14736,7 +14736,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32(float*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32(,,,,, float*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32(,,,,, float*, , , i64, i64) define @test_vloxseg5_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { @@ -14747,7 +14747,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -14770,7 +14770,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16(float*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16(,,,,, float*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16(,,,,, float*, , , i64, i64) define @test_vloxseg5_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { @@ -14781,7 +14781,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -14804,7 +14804,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8(float*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8(,,,,, float*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8(,,,,, float*, , , i64, i64) define @test_vloxseg5_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { @@ -14815,7 +14815,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -14838,7 +14838,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i64(float*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i64(,,,,,, float*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64(,,,,,, float*, , , i64, i64) define @test_vloxseg6_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { @@ -14849,7 +14849,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i64( undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -14873,7 +14873,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32(float*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32(,,,,,, float*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32(,,,,,, float*, , , i64, i64) define @test_vloxseg6_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { @@ -14884,7 +14884,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -14908,7 +14908,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16(float*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16(,,,,,, float*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16(,,,,,, float*, , , i64, i64) define @test_vloxseg6_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { @@ -14919,7 +14919,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -14943,7 +14943,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8(float*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8(,,,,,, float*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8(,,,,,, float*, , , i64, i64) define @test_vloxseg6_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { @@ -14954,7 +14954,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -14978,7 +14978,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i64(float*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i64(,,,,,,, float*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64(,,,,,,, float*, , , i64, i64) define @test_vloxseg7_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { @@ -14989,7 +14989,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i64( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -15014,7 +15014,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32(float*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32(,,,,,,, float*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32(,,,,,,, float*, , , i64, i64) define @test_vloxseg7_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { @@ -15025,7 +15025,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -15050,7 +15050,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16(float*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16(,,,,,,, float*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16(,,,,,,, float*, , , i64, i64) define @test_vloxseg7_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { @@ -15061,7 +15061,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -15086,7 +15086,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8(float*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8(,,,,,,, float*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8(,,,,,,, float*, , , i64, i64) define @test_vloxseg7_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { @@ -15097,7 +15097,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -15122,7 +15122,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i64(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i64(,,,,,,,, float*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64(,,,,,,,, float*, , , i64, i64) define @test_vloxseg8_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { @@ -15133,7 +15133,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -15159,7 +15159,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32(,,,,,,,, float*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, float*, , , i64, i64) define @test_vloxseg8_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { @@ -15170,7 +15170,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -15196,7 +15196,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16(,,,,,,,, float*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, float*, , , i64, i64) define @test_vloxseg8_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { @@ -15207,7 +15207,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -15233,7 +15233,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8(,,,,,,,, float*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, float*, , , i64, i64) define @test_vloxseg8_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { @@ -15244,7 +15244,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -15270,7 +15270,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16(half*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16(,, half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16(,, half*, , , i64, i64) define @test_vloxseg2_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { @@ -15281,7 +15281,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15300,7 +15300,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8(half*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8(,, half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8(,, half*, , , i64, i64) define @test_vloxseg2_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { @@ -15311,7 +15311,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15330,7 +15330,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i64(half*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i64(,, half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64(,, half*, , , i64, i64) define @test_vloxseg2_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { @@ -15341,7 +15341,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i64( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15360,7 +15360,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32(half*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32(,, half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32(,, half*, , , i64, i64) define @test_vloxseg2_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { @@ -15371,7 +15371,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15390,7 +15390,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16(half*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16(,,, half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16(,,, half*, , , i64, i64) define @test_vloxseg3_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { @@ -15401,7 +15401,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -15422,7 +15422,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8(half*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8(,,, half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8(,,, half*, , , i64, i64) define @test_vloxseg3_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { @@ -15433,7 +15433,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -15454,7 +15454,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i64(half*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i64(,,, half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64(,,, half*, , , i64, i64) define @test_vloxseg3_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { @@ -15465,7 +15465,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i64( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -15485,7 +15485,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32(half*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32(,,, half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32(,,, half*, , , i64, i64) define @test_vloxseg3_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { @@ -15496,7 +15496,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -15516,7 +15516,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16(half*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16(,,,, half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { @@ -15527,7 +15527,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -15549,7 +15549,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8(half*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8(,,,, half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { @@ -15560,7 +15560,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -15582,7 +15582,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i64(half*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i64(,,,, half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { @@ -15593,7 +15593,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i64( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -15614,7 +15614,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32(half*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32(,,,, half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { @@ -15625,7 +15625,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -15647,7 +15647,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16(float*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16(,, float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16(,, float*, , , i64, i64) define @test_vloxseg2_nxv8f32_nxv8i16(float* %base, %index, i64 %vl) { @@ -15658,7 +15658,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15677,7 +15677,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8(float*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8(,, float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8(,, float*, , , i64, i64) define @test_vloxseg2_nxv8f32_nxv8i8(float* %base, %index, i64 %vl) { @@ -15688,7 +15688,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15707,7 +15707,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i64(float*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i64(,, float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64(,, float*, , , i64, i64) define @test_vloxseg2_nxv8f32_nxv8i64(float* %base, %index, i64 %vl) { @@ -15718,7 +15718,7 @@ ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i64(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i64( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15737,7 +15737,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32(float*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32(,, float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32(,, float*, , , i64, i64) define @test_vloxseg2_nxv8f32_nxv8i32(float* %base, %index, i64 %vl) { @@ -15748,7 +15748,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15767,7 +15767,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32(double*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32(,, double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32(,, double*, , , i64, i64) define @test_vloxseg2_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { @@ -15778,7 +15778,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15797,7 +15797,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8(double*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8(,, double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8(,, double*, , , i64, i64) define @test_vloxseg2_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { @@ -15808,7 +15808,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15827,7 +15827,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16(double*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16(,, double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16(,, double*, , , i64, i64) define @test_vloxseg2_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { @@ -15838,7 +15838,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15857,7 +15857,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i64(double*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i64(,, double*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64(,, double*, , , i64, i64) define @test_vloxseg2_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { @@ -15868,7 +15868,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i64( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15887,7 +15887,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32(double*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32(,,, double*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32(,,, double*, , , i64, i64) define @test_vloxseg3_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { @@ -15898,7 +15898,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32( undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -15919,7 +15919,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8(double*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8(,,, double*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8(,,, double*, , , i64, i64) define @test_vloxseg3_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { @@ -15930,7 +15930,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8( undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -15951,7 +15951,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16(double*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16(,,, double*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16(,,, double*, , , i64, i64) define @test_vloxseg3_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { @@ -15962,7 +15962,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16( undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -15983,7 +15983,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i64(double*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i64(,,, double*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64(,,, double*, , , i64, i64) define @test_vloxseg3_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { @@ -15994,7 +15994,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i64( undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -16015,7 +16015,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32(double*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32(,,,, double*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32(,,,, double*, , , i64, i64) define @test_vloxseg4_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { @@ -16026,7 +16026,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32( undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -16048,7 +16048,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8(double*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8(,,,, double*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8(,,,, double*, , , i64, i64) define @test_vloxseg4_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { @@ -16059,7 +16059,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8( undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -16081,7 +16081,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16(double*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16(,,,, double*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16(,,,, double*, , , i64, i64) define @test_vloxseg4_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { @@ -16092,7 +16092,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16( undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -16114,7 +16114,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i64(double*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i64(,,,, double*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64(,,,, double*, , , i64, i64) define @test_vloxseg4_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { @@ -16125,7 +16125,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i64( undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -16147,7 +16147,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32(half*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32(,, half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32(,, half*, , , i64, i64) define @test_vloxseg2_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { @@ -16158,7 +16158,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -16177,7 +16177,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8(half*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8(,, half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8(,, half*, , , i64, i64) define @test_vloxseg2_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { @@ -16188,7 +16188,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -16207,7 +16207,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i64(half*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i64(,, half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64(,, half*, , , i64, i64) define @test_vloxseg2_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { @@ -16218,7 +16218,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i64( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -16237,7 +16237,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16(half*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16(,, half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16(,, half*, , , i64, i64) define @test_vloxseg2_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { @@ -16248,7 +16248,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -16267,7 +16267,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32(half*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32(,,, half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32(,,, half*, , , i64, i64) define @test_vloxseg3_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { @@ -16278,7 +16278,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -16298,7 +16298,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8(half*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8(,,, half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8(,,, half*, , , i64, i64) define @test_vloxseg3_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { @@ -16309,7 +16309,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -16330,7 +16330,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i64(half*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i64(,,, half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64(,,, half*, , , i64, i64) define @test_vloxseg3_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { @@ -16341,7 +16341,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i64( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -16361,7 +16361,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16(half*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16(,,, half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16(,,, half*, , , i64, i64) define @test_vloxseg3_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { @@ -16372,7 +16372,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -16393,7 +16393,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32(half*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32(,,,, half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { @@ -16404,7 +16404,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -16426,7 +16426,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8(half*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8(,,,, half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { @@ -16437,7 +16437,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -16459,7 +16459,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i64(half*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i64(,,,, half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { @@ -16470,7 +16470,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i64( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -16491,7 +16491,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16(half*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16(,,,, half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { @@ -16502,7 +16502,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -16524,7 +16524,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32(half*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { @@ -16535,7 +16535,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -16558,7 +16558,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8(half*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { @@ -16569,7 +16569,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -16592,7 +16592,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i64(half*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i64(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { @@ -16603,7 +16603,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i64( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -16625,7 +16625,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16(half*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { @@ -16636,7 +16636,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -16659,7 +16659,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { @@ -16670,7 +16670,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -16694,7 +16694,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { @@ -16705,7 +16705,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -16729,7 +16729,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i64(half*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i64(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { @@ -16740,7 +16740,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i64( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -16764,7 +16764,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { @@ -16775,7 +16775,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -16799,7 +16799,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { @@ -16810,7 +16810,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -16835,7 +16835,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { @@ -16846,7 +16846,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -16871,7 +16871,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i64(half*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i64(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { @@ -16882,7 +16882,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i64( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -16907,7 +16907,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { @@ -16918,7 +16918,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -16943,7 +16943,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { @@ -16954,7 +16954,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -16980,7 +16980,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { @@ -16991,7 +16991,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -17017,7 +17017,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i64(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i64(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { @@ -17028,7 +17028,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i64( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -17054,7 +17054,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { @@ -17065,7 +17065,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -17091,7 +17091,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32(half*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32(,, half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32(,, half*, , , i64, i64) define @test_vloxseg2_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { @@ -17102,7 +17102,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -17121,7 +17121,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8(half*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8(,, half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8(,, half*, , , i64, i64) define @test_vloxseg2_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { @@ -17132,7 +17132,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -17151,7 +17151,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16(half*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16(,, half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16(,, half*, , , i64, i64) define @test_vloxseg2_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { @@ -17162,7 +17162,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -17181,7 +17181,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i64(half*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i64(,, half*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64(,, half*, , , i64, i64) define @test_vloxseg2_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { @@ -17192,7 +17192,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i64( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -17211,7 +17211,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32(half*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32(,,, half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32(,,, half*, , , i64, i64) define @test_vloxseg3_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { @@ -17222,7 +17222,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -17243,7 +17243,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8(half*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8(,,, half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8(,,, half*, , , i64, i64) define @test_vloxseg3_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { @@ -17254,7 +17254,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -17275,7 +17275,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16(half*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16(,,, half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16(,,, half*, , , i64, i64) define @test_vloxseg3_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { @@ -17286,7 +17286,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -17307,7 +17307,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i64(half*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i64(,,, half*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64(,,, half*, , , i64, i64) define @test_vloxseg3_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { @@ -17318,7 +17318,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i64( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -17338,7 +17338,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32(half*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32(,,,, half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { @@ -17349,7 +17349,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -17371,7 +17371,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8(half*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8(,,,, half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { @@ -17382,7 +17382,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -17404,7 +17404,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16(half*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16(,,,, half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { @@ -17415,7 +17415,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -17437,7 +17437,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i64(half*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i64(,,,, half*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64(,,,, half*, , , i64, i64) define @test_vloxseg4_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { @@ -17448,7 +17448,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i64( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -17470,7 +17470,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32(half*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { @@ -17481,7 +17481,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -17504,7 +17504,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8(half*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { @@ -17515,7 +17515,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -17538,7 +17538,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16(half*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { @@ -17549,7 +17549,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -17572,7 +17572,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i64(half*, , i64) +declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i64(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64(,,,,, half*, , , i64, i64) define @test_vloxseg5_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { @@ -17583,7 +17583,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i64( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -17606,7 +17606,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { @@ -17617,7 +17617,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -17641,7 +17641,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { @@ -17652,7 +17652,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -17676,7 +17676,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { @@ -17687,7 +17687,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -17711,7 +17711,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i64(half*, , i64) +declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i64(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64(,,,,,, half*, , , i64, i64) define @test_vloxseg6_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { @@ -17722,7 +17722,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i64( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -17746,7 +17746,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { @@ -17757,7 +17757,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -17782,7 +17782,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { @@ -17793,7 +17793,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -17818,7 +17818,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { @@ -17829,7 +17829,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -17854,7 +17854,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i64(half*, , i64) +declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i64(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64(,,,,,,, half*, , , i64, i64) define @test_vloxseg7_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { @@ -17865,7 +17865,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i64( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -17890,7 +17890,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { @@ -17901,7 +17901,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -17927,7 +17927,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { @@ -17938,7 +17938,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -17964,7 +17964,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { @@ -17975,7 +17975,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -18001,7 +18001,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i64(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i64(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64(,,,,,,,, half*, , , i64, i64) define @test_vloxseg8_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { @@ -18012,7 +18012,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i64( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -18038,7 +18038,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32(float*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32(,, float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32(,, float*, , , i64, i64) define @test_vloxseg2_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { @@ -18049,7 +18049,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -18068,7 +18068,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8(float*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8(,, float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8(,, float*, , , i64, i64) define @test_vloxseg2_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { @@ -18079,7 +18079,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -18098,7 +18098,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i64(float*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i64(,, float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64(,, float*, , , i64, i64) define @test_vloxseg2_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { @@ -18109,7 +18109,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i64( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -18128,7 +18128,7 @@ ret %1 } -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16(float*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16(,, float*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16(,, float*, , , i64, i64) define @test_vloxseg2_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { @@ -18139,7 +18139,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -18158,7 +18158,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32(float*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32(,,, float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32(,,, float*, , , i64, i64) define @test_vloxseg3_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { @@ -18169,7 +18169,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -18190,7 +18190,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8(float*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8(,,, float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8(,,, float*, , , i64, i64) define @test_vloxseg3_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { @@ -18201,7 +18201,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -18222,7 +18222,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i64(float*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i64(,,, float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64(,,, float*, , , i64, i64) define @test_vloxseg3_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { @@ -18233,7 +18233,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i64( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -18253,7 +18253,7 @@ ret %1 } -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16(float*, , i64) +declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16(,,, float*, , i64) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16(,,, float*, , , i64, i64) define @test_vloxseg3_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { @@ -18264,7 +18264,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -18285,7 +18285,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32(float*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32(,,,, float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { @@ -18296,7 +18296,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -18318,7 +18318,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8(float*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8(,,,, float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { @@ -18329,7 +18329,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -18351,7 +18351,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i64(float*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i64(,,,, float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { @@ -18362,7 +18362,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i64( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -18384,7 +18384,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16(float*, , i64) +declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16(,,,, float*, , i64) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16(,,,, float*, , , i64, i64) define @test_vloxseg4_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { @@ -18395,7 +18395,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll @@ -2,7 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+experimental-zvfh \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,} @llvm.riscv.vlseg2.nxv16i16(i16* , i32) +declare {,} @llvm.riscv.vlseg2.nxv16i16(,, i16* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv16i16(,, i16*, , i32, i32) define @test_vlseg2_nxv16i16(i16* %base, i32 %vl) { @@ -13,7 +13,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16( undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -28,14 +28,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16( undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i16( %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv1i8(i8* , i32) +declare {,} @llvm.riscv.vlseg2.nxv1i8(,, i8* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv1i8(,, i8*, , i32, i32) define @test_vlseg2_nxv1i8(i8* %base, i32 %vl) { @@ -46,7 +46,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8( undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -61,14 +61,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8( undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i8( %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv1i8(i8* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv1i8(,,, i8* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv1i8(,,, i8*, , i32, i32) define @test_vlseg3_nxv1i8(i8* %base, i32 %vl) { @@ -79,7 +79,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8( undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -95,14 +95,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8( undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i8( %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv1i8(i8* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv1i8(,,,, i8* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i8(,,,, i8*, , i32, i32) define @test_vlseg4_nxv1i8(i8* %base, i32 %vl) { @@ -113,7 +113,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8( undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -130,14 +130,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8( undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i8( %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1i8(i8* , i32) +declare {,,,,} @llvm.riscv.vlseg5.nxv1i8(,,,,, i8* , i32) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i8(,,,,, i8*, , i32, i32) define @test_vlseg5_nxv1i8(i8* %base, i32 %vl) { @@ -148,7 +148,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8( undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -166,14 +166,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8( undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i8( %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1i8(i8* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.nxv1i8(,,,,,, i8* , i32) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i8(,,,,,, i8*, , i32, i32) define @test_vlseg6_nxv1i8(i8* %base, i32 %vl) { @@ -184,7 +184,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8( undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -203,14 +203,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8( undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(i8* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(,,,,,,, i8* , i32) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i8(,,,,,,, i8*, , i32, i32) define @test_vlseg7_nxv1i8(i8* %base, i32 %vl) { @@ -221,7 +221,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -241,14 +241,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(i8* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(,,,,,,,, i8* , i32) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i8(,,,,,,,, i8*, , i32, i32) define @test_vlseg8_nxv1i8(i8* %base, i32 %vl) { @@ -259,7 +259,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -280,14 +280,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv16i8(i8* , i32) +declare {,} @llvm.riscv.vlseg2.nxv16i8(,, i8* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv16i8(,, i8*, , i32, i32) define @test_vlseg2_nxv16i8(i8* %base, i32 %vl) { @@ -298,7 +298,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8( undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -313,14 +313,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8( undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i8( %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv16i8(i8* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv16i8(,,, i8* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv16i8(,,, i8*, , i32, i32) define @test_vlseg3_nxv16i8(i8* %base, i32 %vl) { @@ -331,7 +331,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8( undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -347,14 +347,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8( undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv16i8( %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv16i8(i8* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv16i8(,,,, i8* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv16i8(,,,, i8*, , i32, i32) define @test_vlseg4_nxv16i8(i8* %base, i32 %vl) { @@ -365,7 +365,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8( undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -382,14 +382,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8( undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv16i8( %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv2i32(i32* , i32) +declare {,} @llvm.riscv.vlseg2.nxv2i32(,, i32* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv2i32(,, i32*, , i32, i32) define @test_vlseg2_nxv2i32(i32* %base, i32 %vl) { @@ -400,7 +400,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32( undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -415,14 +415,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32( undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i32( %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv2i32(i32* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv2i32(,,, i32* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv2i32(,,, i32*, , i32, i32) define @test_vlseg3_nxv2i32(i32* %base, i32 %vl) { @@ -433,7 +433,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32( undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -449,14 +449,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32( undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i32( %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv2i32(i32* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv2i32(,,,, i32* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i32(,,,, i32*, , i32, i32) define @test_vlseg4_nxv2i32(i32* %base, i32 %vl) { @@ -467,7 +467,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32( undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -484,14 +484,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32( undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i32( %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv2i32(i32* , i32) +declare {,,,,} @llvm.riscv.vlseg5.nxv2i32(,,,,, i32* , i32) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i32(,,,,, i32*, , i32, i32) define @test_vlseg5_nxv2i32(i32* %base, i32 %vl) { @@ -502,7 +502,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32( undef, undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -520,14 +520,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32( undef, undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i32( %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv2i32(i32* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.nxv2i32(,,,,,, i32* , i32) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i32(,,,,,, i32*, , i32, i32) define @test_vlseg6_nxv2i32(i32* %base, i32 %vl) { @@ -538,7 +538,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32( undef, undef, undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -557,14 +557,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32( undef, undef, undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(i32* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(,,,,,,, i32* , i32) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i32(,,,,,,, i32*, , i32, i32) define @test_vlseg7_nxv2i32(i32* %base, i32 %vl) { @@ -575,7 +575,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -595,14 +595,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(i32* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(,,,,,,,, i32* , i32) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i32(,,,,,,,, i32*, , i32, i32) define @test_vlseg8_nxv2i32(i32* %base, i32 %vl) { @@ -613,7 +613,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -634,14 +634,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv4i16(i16* , i32) +declare {,} @llvm.riscv.vlseg2.nxv4i16(,, i16* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv4i16(,, i16*, , i32, i32) define @test_vlseg2_nxv4i16(i16* %base, i32 %vl) { @@ -652,7 +652,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16( undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -667,14 +667,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16( undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i16( %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv4i16(i16* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv4i16(,,, i16* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv4i16(,,, i16*, , i32, i32) define @test_vlseg3_nxv4i16(i16* %base, i32 %vl) { @@ -685,7 +685,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16( undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -701,14 +701,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16( undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i16( %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv4i16(i16* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv4i16(,,,, i16* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i16(,,,, i16*, , i32, i32) define @test_vlseg4_nxv4i16(i16* %base, i32 %vl) { @@ -719,7 +719,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16( undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -736,14 +736,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16( undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i16( %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv4i16(i16* , i32) +declare {,,,,} @llvm.riscv.vlseg5.nxv4i16(,,,,, i16* , i32) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4i16(,,,,, i16*, , i32, i32) define @test_vlseg5_nxv4i16(i16* %base, i32 %vl) { @@ -754,7 +754,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16( undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -772,14 +772,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16( undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4i16( %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv4i16(i16* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.nxv4i16(,,,,,, i16* , i32) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i16(,,,,,, i16*, , i32, i32) define @test_vlseg6_nxv4i16(i16* %base, i32 %vl) { @@ -790,7 +790,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16( undef, undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -809,14 +809,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16( undef, undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(i16* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(,,,,,,, i16* , i32) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i16(,,,,,,, i16*, , i32, i32) define @test_vlseg7_nxv4i16(i16* %base, i32 %vl) { @@ -827,7 +827,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -847,14 +847,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(i16* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(,,,,,,,, i16* , i32) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i16(,,,,,,,, i16*, , i32, i32) define @test_vlseg8_nxv4i16(i16* %base, i32 %vl) { @@ -865,7 +865,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -886,14 +886,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv1i32(i32* , i32) +declare {,} @llvm.riscv.vlseg2.nxv1i32(,, i32* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv1i32(,, i32*, , i32, i32) define @test_vlseg2_nxv1i32(i32* %base, i32 %vl) { @@ -904,7 +904,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32( undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -919,14 +919,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32( undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i32( %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv1i32(i32* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv1i32(,,, i32* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv1i32(,,, i32*, , i32, i32) define @test_vlseg3_nxv1i32(i32* %base, i32 %vl) { @@ -937,7 +937,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32( undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -953,14 +953,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32( undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i32( %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv1i32(i32* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv1i32(,,,, i32* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i32(,,,, i32*, , i32, i32) define @test_vlseg4_nxv1i32(i32* %base, i32 %vl) { @@ -971,7 +971,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32( undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -988,14 +988,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32( undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i32( %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1i32(i32* , i32) +declare {,,,,} @llvm.riscv.vlseg5.nxv1i32(,,,,, i32* , i32) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i32(,,,,, i32*, , i32, i32) define @test_vlseg5_nxv1i32(i32* %base, i32 %vl) { @@ -1006,7 +1006,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32( undef, undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1024,14 +1024,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32( undef, undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i32( %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1i32(i32* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.nxv1i32(,,,,,, i32* , i32) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i32(,,,,,, i32*, , i32, i32) define @test_vlseg6_nxv1i32(i32* %base, i32 %vl) { @@ -1042,7 +1042,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32( undef, undef, undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1061,14 +1061,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32( undef, undef, undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(i32* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(,,,,,,, i32* , i32) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i32(,,,,,,, i32*, , i32, i32) define @test_vlseg7_nxv1i32(i32* %base, i32 %vl) { @@ -1079,7 +1079,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1099,14 +1099,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(i32* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(,,,,,,,, i32* , i32) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i32(,,,,,,,, i32*, , i32, i32) define @test_vlseg8_nxv1i32(i32* %base, i32 %vl) { @@ -1117,7 +1117,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1138,14 +1138,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv8i16(i16* , i32) +declare {,} @llvm.riscv.vlseg2.nxv8i16(,, i16* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv8i16(,, i16*, , i32, i32) define @test_vlseg2_nxv8i16(i16* %base, i32 %vl) { @@ -1156,7 +1156,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16( undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1171,14 +1171,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16( undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i16( %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv8i16(i16* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv8i16(,,, i16* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv8i16(,,, i16*, , i32, i32) define @test_vlseg3_nxv8i16(i16* %base, i32 %vl) { @@ -1189,7 +1189,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16( undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1205,14 +1205,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16( undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8i16( %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv8i16(i16* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv8i16(,,,, i16* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv8i16(,,,, i16*, , i32, i32) define @test_vlseg4_nxv8i16(i16* %base, i32 %vl) { @@ -1223,7 +1223,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16( undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1240,14 +1240,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16( undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8i16( %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv8i8(i8* , i32) +declare {,} @llvm.riscv.vlseg2.nxv8i8(,, i8* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv8i8(,, i8*, , i32, i32) define @test_vlseg2_nxv8i8(i8* %base, i32 %vl) { @@ -1258,7 +1258,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8( undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1273,14 +1273,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8( undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i8( %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv8i8(i8* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv8i8(,,, i8* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv8i8(,,, i8*, , i32, i32) define @test_vlseg3_nxv8i8(i8* %base, i32 %vl) { @@ -1291,7 +1291,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8( undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1307,14 +1307,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8( undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8i8( %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv8i8(i8* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv8i8(,,,, i8* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv8i8(,,,, i8*, , i32, i32) define @test_vlseg4_nxv8i8(i8* %base, i32 %vl) { @@ -1325,7 +1325,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8( undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1342,14 +1342,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8( undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8i8( %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv8i8(i8* , i32) +declare {,,,,} @llvm.riscv.vlseg5.nxv8i8(,,,,, i8* , i32) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv8i8(,,,,, i8*, , i32, i32) define @test_vlseg5_nxv8i8(i8* %base, i32 %vl) { @@ -1360,7 +1360,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8( undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1378,14 +1378,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8( undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv8i8( %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv8i8(i8* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.nxv8i8(,,,,,, i8* , i32) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv8i8(,,,,,, i8*, , i32, i32) define @test_vlseg6_nxv8i8(i8* %base, i32 %vl) { @@ -1396,7 +1396,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8( undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1415,14 +1415,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8( undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(i8* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(,,,,,,, i8* , i32) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv8i8(,,,,,,, i8*, , i32, i32) define @test_vlseg7_nxv8i8(i8* %base, i32 %vl) { @@ -1433,7 +1433,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1453,14 +1453,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(i8* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(,,,,,,,, i8* , i32) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv8i8(,,,,,,,, i8*, , i32, i32) define @test_vlseg8_nxv8i8(i8* %base, i32 %vl) { @@ -1471,7 +1471,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1492,14 +1492,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv8i32(i32* , i32) +declare {,} @llvm.riscv.vlseg2.nxv8i32(,, i32* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv8i32(,, i32*, , i32, i32) define @test_vlseg2_nxv8i32(i32* %base, i32 %vl) { @@ -1510,7 +1510,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32( undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1525,14 +1525,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32( undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i32( %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv4i8(i8* , i32) +declare {,} @llvm.riscv.vlseg2.nxv4i8(,, i8* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv4i8(,, i8*, , i32, i32) define @test_vlseg2_nxv4i8(i8* %base, i32 %vl) { @@ -1543,7 +1543,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8( undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1558,14 +1558,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8( undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i8( %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv4i8(i8* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv4i8(,,, i8* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv4i8(,,, i8*, , i32, i32) define @test_vlseg3_nxv4i8(i8* %base, i32 %vl) { @@ -1576,7 +1576,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8( undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1592,14 +1592,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8( undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i8( %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv4i8(i8* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv4i8(,,,, i8* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i8(,,,, i8*, , i32, i32) define @test_vlseg4_nxv4i8(i8* %base, i32 %vl) { @@ -1610,7 +1610,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8( undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1627,14 +1627,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8( undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i8( %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv4i8(i8* , i32) +declare {,,,,} @llvm.riscv.vlseg5.nxv4i8(,,,,, i8* , i32) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4i8(,,,,, i8*, , i32, i32) define @test_vlseg5_nxv4i8(i8* %base, i32 %vl) { @@ -1645,7 +1645,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8( undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1663,14 +1663,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8( undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4i8( %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv4i8(i8* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.nxv4i8(,,,,,, i8* , i32) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i8(,,,,,, i8*, , i32, i32) define @test_vlseg6_nxv4i8(i8* %base, i32 %vl) { @@ -1681,7 +1681,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8( undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1700,14 +1700,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8( undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(i8* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(,,,,,,, i8* , i32) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i8(,,,,,,, i8*, , i32, i32) define @test_vlseg7_nxv4i8(i8* %base, i32 %vl) { @@ -1718,7 +1718,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1738,14 +1738,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(i8* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(,,,,,,,, i8* , i32) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i8(,,,,,,,, i8*, , i32, i32) define @test_vlseg8_nxv4i8(i8* %base, i32 %vl) { @@ -1756,7 +1756,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1777,14 +1777,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv1i16(i16* , i32) +declare {,} @llvm.riscv.vlseg2.nxv1i16(,, i16* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv1i16(,, i16*, , i32, i32) define @test_vlseg2_nxv1i16(i16* %base, i32 %vl) { @@ -1795,7 +1795,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16( undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1810,14 +1810,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16( undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i16( %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv1i16(i16* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv1i16(,,, i16* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv1i16(,,, i16*, , i32, i32) define @test_vlseg3_nxv1i16(i16* %base, i32 %vl) { @@ -1828,7 +1828,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16( undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1844,14 +1844,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16( undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i16( %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv1i16(i16* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv1i16(,,,, i16* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i16(,,,, i16*, , i32, i32) define @test_vlseg4_nxv1i16(i16* %base, i32 %vl) { @@ -1862,7 +1862,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16( undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1879,14 +1879,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16( undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i16( %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1i16(i16* , i32) +declare {,,,,} @llvm.riscv.vlseg5.nxv1i16(,,,,, i16* , i32) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i16(,,,,, i16*, , i32, i32) define @test_vlseg5_nxv1i16(i16* %base, i32 %vl) { @@ -1897,7 +1897,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16( undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1915,14 +1915,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16( undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i16( %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1i16(i16* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.nxv1i16(,,,,,, i16* , i32) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i16(,,,,,, i16*, , i32, i32) define @test_vlseg6_nxv1i16(i16* %base, i32 %vl) { @@ -1933,7 +1933,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16( undef, undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1952,14 +1952,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16( undef, undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(i16* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(,,,,,,, i16* , i32) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i16(,,,,,,, i16*, , i32, i32) define @test_vlseg7_nxv1i16(i16* %base, i32 %vl) { @@ -1970,7 +1970,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1990,14 +1990,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(i16* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(,,,,,,,, i16* , i32) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i16(,,,,,,,, i16*, , i32, i32) define @test_vlseg8_nxv1i16(i16* %base, i32 %vl) { @@ -2008,7 +2008,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2029,14 +2029,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv32i8(i8* , i32) +declare {,} @llvm.riscv.vlseg2.nxv32i8(,, i8* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv32i8(,, i8*, , i32, i32) define @test_vlseg2_nxv32i8(i8* %base, i32 %vl) { @@ -2047,7 +2047,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8( undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2062,14 +2062,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8( undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv32i8( %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv2i8(i8* , i32) +declare {,} @llvm.riscv.vlseg2.nxv2i8(,, i8* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv2i8(,, i8*, , i32, i32) define @test_vlseg2_nxv2i8(i8* %base, i32 %vl) { @@ -2080,7 +2080,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8( undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2095,14 +2095,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8( undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i8( %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv2i8(i8* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv2i8(,,, i8* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv2i8(,,, i8*, , i32, i32) define @test_vlseg3_nxv2i8(i8* %base, i32 %vl) { @@ -2113,7 +2113,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8( undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2129,14 +2129,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8( undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i8( %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv2i8(i8* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv2i8(,,,, i8* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i8(,,,, i8*, , i32, i32) define @test_vlseg4_nxv2i8(i8* %base, i32 %vl) { @@ -2147,7 +2147,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8( undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2164,14 +2164,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8( undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i8( %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv2i8(i8* , i32) +declare {,,,,} @llvm.riscv.vlseg5.nxv2i8(,,,,, i8* , i32) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i8(,,,,, i8*, , i32, i32) define @test_vlseg5_nxv2i8(i8* %base, i32 %vl) { @@ -2182,7 +2182,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8( undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2200,14 +2200,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8( undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i8( %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv2i8(i8* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.nxv2i8(,,,,,, i8* , i32) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i8(,,,,,, i8*, , i32, i32) define @test_vlseg6_nxv2i8(i8* %base, i32 %vl) { @@ -2218,7 +2218,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8( undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2237,14 +2237,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8( undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(i8* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(,,,,,,, i8* , i32) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i8(,,,,,,, i8*, , i32, i32) define @test_vlseg7_nxv2i8(i8* %base, i32 %vl) { @@ -2255,7 +2255,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2275,14 +2275,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(i8* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(,,,,,,,, i8* , i32) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i8(,,,,,,,, i8*, , i32, i32) define @test_vlseg8_nxv2i8(i8* %base, i32 %vl) { @@ -2293,7 +2293,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2314,14 +2314,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv2i16(i16* , i32) +declare {,} @llvm.riscv.vlseg2.nxv2i16(,, i16* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv2i16(,, i16*, , i32, i32) define @test_vlseg2_nxv2i16(i16* %base, i32 %vl) { @@ -2332,7 +2332,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16( undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2347,14 +2347,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16( undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i16( %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv2i16(i16* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv2i16(,,, i16* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv2i16(,,, i16*, , i32, i32) define @test_vlseg3_nxv2i16(i16* %base, i32 %vl) { @@ -2365,7 +2365,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16( undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2381,14 +2381,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16( undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i16( %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv2i16(i16* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv2i16(,,,, i16* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i16(,,,, i16*, , i32, i32) define @test_vlseg4_nxv2i16(i16* %base, i32 %vl) { @@ -2399,7 +2399,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16( undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2416,14 +2416,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16( undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i16( %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv2i16(i16* , i32) +declare {,,,,} @llvm.riscv.vlseg5.nxv2i16(,,,,, i16* , i32) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i16(,,,,, i16*, , i32, i32) define @test_vlseg5_nxv2i16(i16* %base, i32 %vl) { @@ -2434,7 +2434,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16( undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2452,14 +2452,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16( undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i16( %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv2i16(i16* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.nxv2i16(,,,,,, i16* , i32) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i16(,,,,,, i16*, , i32, i32) define @test_vlseg6_nxv2i16(i16* %base, i32 %vl) { @@ -2470,7 +2470,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16( undef, undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2489,14 +2489,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16( undef, undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(i16* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(,,,,,,, i16* , i32) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i16(,,,,,,, i16*, , i32, i32) define @test_vlseg7_nxv2i16(i16* %base, i32 %vl) { @@ -2507,7 +2507,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2527,14 +2527,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(i16* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(,,,,,,,, i16* , i32) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i16(,,,,,,,, i16*, , i32, i32) define @test_vlseg8_nxv2i16(i16* %base, i32 %vl) { @@ -2545,7 +2545,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2566,14 +2566,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv4i32(i32* , i32) +declare {,} @llvm.riscv.vlseg2.nxv4i32(,, i32* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv4i32(,, i32*, , i32, i32) define @test_vlseg2_nxv4i32(i32* %base, i32 %vl) { @@ -2584,7 +2584,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32( undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2599,14 +2599,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32( undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i32( %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv4i32(i32* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv4i32(,,, i32* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv4i32(,,, i32*, , i32, i32) define @test_vlseg3_nxv4i32(i32* %base, i32 %vl) { @@ -2617,7 +2617,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32( undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2633,14 +2633,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32( undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i32( %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv4i32(i32* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv4i32(,,,, i32* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i32(,,,, i32*, , i32, i32) define @test_vlseg4_nxv4i32(i32* %base, i32 %vl) { @@ -2651,7 +2651,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32( undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2668,14 +2668,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32( undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i32( %1, %1, %1, %1, i32* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv16f16(half* , i32) +declare {,} @llvm.riscv.vlseg2.nxv16f16(,, half* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv16f16(,, half*, , i32, i32) define @test_vlseg2_nxv16f16(half* %base, i32 %vl) { @@ -2686,7 +2686,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16(half* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16( undef, undef, half* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2701,14 +2701,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16(half* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16( undef, undef, half* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16f16( %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv4f64(double* , i32) +declare {,} @llvm.riscv.vlseg2.nxv4f64(,, double* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv4f64(,, double*, , i32, i32) define @test_vlseg2_nxv4f64(double* %base, i32 %vl) { @@ -2719,7 +2719,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64(double* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64( undef, undef, double* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2734,14 +2734,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64(double* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64( undef, undef, double* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f64( %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv1f64(double* , i32) +declare {,} @llvm.riscv.vlseg2.nxv1f64(,, double* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv1f64(,, double*, , i32, i32) define @test_vlseg2_nxv1f64(double* %base, i32 %vl) { @@ -2752,7 +2752,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64( undef, undef, double* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2767,14 +2767,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64( undef, undef, double* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f64( %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv1f64(double* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv1f64(,,, double* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv1f64(,,, double*, , i32, i32) define @test_vlseg3_nxv1f64(double* %base, i32 %vl) { @@ -2785,7 +2785,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64( undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2801,14 +2801,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64( undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f64( %1, %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv1f64(double* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv1f64(,,,, double* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f64(,,,, double*, , i32, i32) define @test_vlseg4_nxv1f64(double* %base, i32 %vl) { @@ -2819,7 +2819,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64( undef, undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2836,14 +2836,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64( undef, undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f64( %1, %1, %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1f64(double* , i32) +declare {,,,,} @llvm.riscv.vlseg5.nxv1f64(,,,,, double* , i32) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f64(,,,,, double*, , i32, i32) define @test_vlseg5_nxv1f64(double* %base, i32 %vl) { @@ -2854,7 +2854,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64( undef, undef, undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2872,14 +2872,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64( undef, undef, undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f64( %1, %1, %1, %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1f64(double* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.nxv1f64(,,,,,, double* , i32) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f64(,,,,,, double*, , i32, i32) define @test_vlseg6_nxv1f64(double* %base, i32 %vl) { @@ -2890,7 +2890,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64( undef, undef, undef, undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2909,14 +2909,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64( undef, undef, undef, undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f64( %1, %1, %1, %1, %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(double* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(,,,,,,, double* , i32) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f64(,,,,,,, double*, , i32, i32) define @test_vlseg7_nxv1f64(double* %base, i32 %vl) { @@ -2927,7 +2927,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64( undef, undef, undef, undef, undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2947,14 +2947,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64( undef, undef, undef, undef, undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(double* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(,,,,,,,, double* , i32) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f64(,,,,,,,, double*, , i32, i32) define @test_vlseg8_nxv1f64(double* %base, i32 %vl) { @@ -2965,7 +2965,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2986,14 +2986,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv2f32(float* , i32) +declare {,} @llvm.riscv.vlseg2.nxv2f32(,, float* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv2f32(,, float*, , i32, i32) define @test_vlseg2_nxv2f32(float* %base, i32 %vl) { @@ -3004,7 +3004,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32( undef, undef, float* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3019,14 +3019,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32( undef, undef, float* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f32( %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv2f32(float* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv2f32(,,, float* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv2f32(,,, float*, , i32, i32) define @test_vlseg3_nxv2f32(float* %base, i32 %vl) { @@ -3037,7 +3037,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32( undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3053,14 +3053,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32( undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f32( %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv2f32(float* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv2f32(,,,, float* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f32(,,,, float*, , i32, i32) define @test_vlseg4_nxv2f32(float* %base, i32 %vl) { @@ -3071,7 +3071,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32( undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3088,14 +3088,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32( undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f32( %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv2f32(float* , i32) +declare {,,,,} @llvm.riscv.vlseg5.nxv2f32(,,,,, float* , i32) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2f32(,,,,, float*, , i32, i32) define @test_vlseg5_nxv2f32(float* %base, i32 %vl) { @@ -3106,7 +3106,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32( undef, undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3124,14 +3124,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32( undef, undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2f32( %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv2f32(float* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.nxv2f32(,,,,,, float* , i32) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f32(,,,,,, float*, , i32, i32) define @test_vlseg6_nxv2f32(float* %base, i32 %vl) { @@ -3142,7 +3142,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32( undef, undef, undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3161,14 +3161,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32( undef, undef, undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f32( %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(float* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(,,,,,,, float* , i32) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f32(,,,,,,, float*, , i32, i32) define @test_vlseg7_nxv2f32(float* %base, i32 %vl) { @@ -3179,7 +3179,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32( undef, undef, undef, undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3199,14 +3199,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32( undef, undef, undef, undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(float* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(,,,,,,,, float* , i32) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f32(,,,,,,,, float*, , i32, i32) define @test_vlseg8_nxv2f32(float* %base, i32 %vl) { @@ -3217,7 +3217,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3238,14 +3238,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv1f16(half* , i32) +declare {,} @llvm.riscv.vlseg2.nxv1f16(,, half* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv1f16(,, half*, , i32, i32) define @test_vlseg2_nxv1f16(half* %base, i32 %vl) { @@ -3256,7 +3256,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16( undef, undef, half* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3271,14 +3271,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16( undef, undef, half* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f16( %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv1f16(half* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv1f16(,,, half* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv1f16(,,, half*, , i32, i32) define @test_vlseg3_nxv1f16(half* %base, i32 %vl) { @@ -3289,7 +3289,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16( undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3305,14 +3305,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16( undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f16( %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv1f16(half* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv1f16(,,,, half* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f16(,,,, half*, , i32, i32) define @test_vlseg4_nxv1f16(half* %base, i32 %vl) { @@ -3323,7 +3323,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16( undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3340,14 +3340,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16( undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f16( %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1f16(half* , i32) +declare {,,,,} @llvm.riscv.vlseg5.nxv1f16(,,,,, half* , i32) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f16(,,,,, half*, , i32, i32) define @test_vlseg5_nxv1f16(half* %base, i32 %vl) { @@ -3358,7 +3358,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16( undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3376,14 +3376,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16( undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f16( %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1f16(half* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.nxv1f16(,,,,,, half* , i32) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f16(,,,,,, half*, , i32, i32) define @test_vlseg6_nxv1f16(half* %base, i32 %vl) { @@ -3394,7 +3394,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16( undef, undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3413,14 +3413,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16( undef, undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(half* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(,,,,,,, half* , i32) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f16(,,,,,,, half*, , i32, i32) define @test_vlseg7_nxv1f16(half* %base, i32 %vl) { @@ -3431,7 +3431,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3451,14 +3451,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(half* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(,,,,,,,, half* , i32) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f16(,,,,,,,, half*, , i32, i32) define @test_vlseg8_nxv1f16(half* %base, i32 %vl) { @@ -3469,7 +3469,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3490,14 +3490,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv1f32(float* , i32) +declare {,} @llvm.riscv.vlseg2.nxv1f32(,, float* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv1f32(,, float*, , i32, i32) define @test_vlseg2_nxv1f32(float* %base, i32 %vl) { @@ -3508,7 +3508,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32( undef, undef, float* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3523,14 +3523,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32( undef, undef, float* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f32( %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv1f32(float* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv1f32(,,, float* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv1f32(,,, float*, , i32, i32) define @test_vlseg3_nxv1f32(float* %base, i32 %vl) { @@ -3541,7 +3541,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32( undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3557,14 +3557,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32( undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f32( %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv1f32(float* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv1f32(,,,, float* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f32(,,,, float*, , i32, i32) define @test_vlseg4_nxv1f32(float* %base, i32 %vl) { @@ -3575,7 +3575,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32( undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3592,14 +3592,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32( undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f32( %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1f32(float* , i32) +declare {,,,,} @llvm.riscv.vlseg5.nxv1f32(,,,,, float* , i32) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f32(,,,,, float*, , i32, i32) define @test_vlseg5_nxv1f32(float* %base, i32 %vl) { @@ -3610,7 +3610,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32( undef, undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3628,14 +3628,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32( undef, undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f32( %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1f32(float* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.nxv1f32(,,,,,, float* , i32) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f32(,,,,,, float*, , i32, i32) define @test_vlseg6_nxv1f32(float* %base, i32 %vl) { @@ -3646,7 +3646,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32( undef, undef, undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3665,14 +3665,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32( undef, undef, undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f32( %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(float* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(,,,,,,, float* , i32) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f32(,,,,,,, float*, , i32, i32) define @test_vlseg7_nxv1f32(float* %base, i32 %vl) { @@ -3683,7 +3683,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32( undef, undef, undef, undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3703,14 +3703,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32( undef, undef, undef, undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(float* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(,,,,,,,, float* , i32) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f32(,,,,,,,, float*, , i32, i32) define @test_vlseg8_nxv1f32(float* %base, i32 %vl) { @@ -3721,7 +3721,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3742,14 +3742,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv8f16(half* , i32) +declare {,} @llvm.riscv.vlseg2.nxv8f16(,, half* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv8f16(,, half*, , i32, i32) define @test_vlseg2_nxv8f16(half* %base, i32 %vl) { @@ -3760,7 +3760,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16(half* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16( undef, undef, half* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3775,14 +3775,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16(half* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16( undef, undef, half* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8f16( %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv8f16(half* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv8f16(,,, half* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv8f16(,,, half*, , i32, i32) define @test_vlseg3_nxv8f16(half* %base, i32 %vl) { @@ -3793,7 +3793,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16(half* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16( undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3809,14 +3809,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16(half* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16( undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8f16( %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv8f16(half* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv8f16(,,,, half* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv8f16(,,,, half*, , i32, i32) define @test_vlseg4_nxv8f16(half* %base, i32 %vl) { @@ -3827,7 +3827,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16(half* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16( undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3844,14 +3844,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16(half* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16( undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8f16( %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv8f32(float* , i32) +declare {,} @llvm.riscv.vlseg2.nxv8f32(,, float* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv8f32(,, float*, , i32, i32) define @test_vlseg2_nxv8f32(float* %base, i32 %vl) { @@ -3862,7 +3862,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32(float* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32( undef, undef, float* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3877,14 +3877,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32(float* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32( undef, undef, float* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8f32( %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv2f64(double* , i32) +declare {,} @llvm.riscv.vlseg2.nxv2f64(,, double* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv2f64(,, double*, , i32, i32) define @test_vlseg2_nxv2f64(double* %base, i32 %vl) { @@ -3895,7 +3895,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64(double* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64( undef, undef, double* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3910,14 +3910,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64(double* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64( undef, undef, double* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f64( %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv2f64(double* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv2f64(,,, double* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv2f64(,,, double*, , i32, i32) define @test_vlseg3_nxv2f64(double* %base, i32 %vl) { @@ -3928,7 +3928,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64(double* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64( undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3944,14 +3944,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64(double* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64( undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f64( %1, %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv2f64(double* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv2f64(,,,, double* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f64(,,,, double*, , i32, i32) define @test_vlseg4_nxv2f64(double* %base, i32 %vl) { @@ -3962,7 +3962,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64(double* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64( undef, undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3979,14 +3979,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64(double* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64( undef, undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f64( %1, %1, %1, %1, double* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv4f16(half* , i32) +declare {,} @llvm.riscv.vlseg2.nxv4f16(,, half* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv4f16(,, half*, , i32, i32) define @test_vlseg2_nxv4f16(half* %base, i32 %vl) { @@ -3997,7 +3997,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16( undef, undef, half* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4012,14 +4012,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16( undef, undef, half* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f16( %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv4f16(half* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv4f16(,,, half* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv4f16(,,, half*, , i32, i32) define @test_vlseg3_nxv4f16(half* %base, i32 %vl) { @@ -4030,7 +4030,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16( undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4046,14 +4046,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16( undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4f16( %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv4f16(half* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv4f16(,,,, half* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv4f16(,,,, half*, , i32, i32) define @test_vlseg4_nxv4f16(half* %base, i32 %vl) { @@ -4064,7 +4064,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16( undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4081,14 +4081,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16( undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4f16( %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv4f16(half* , i32) +declare {,,,,} @llvm.riscv.vlseg5.nxv4f16(,,,,, half* , i32) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4f16(,,,,, half*, , i32, i32) define @test_vlseg5_nxv4f16(half* %base, i32 %vl) { @@ -4099,7 +4099,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16( undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4117,14 +4117,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16( undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4f16( %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv4f16(half* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.nxv4f16(,,,,,, half* , i32) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4f16(,,,,,, half*, , i32, i32) define @test_vlseg6_nxv4f16(half* %base, i32 %vl) { @@ -4135,7 +4135,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16( undef, undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4154,14 +4154,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16( undef, undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(half* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(,,,,,,, half* , i32) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4f16(,,,,,,, half*, , i32, i32) define @test_vlseg7_nxv4f16(half* %base, i32 %vl) { @@ -4172,7 +4172,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4192,14 +4192,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(half* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(,,,,,,,, half* , i32) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4f16(,,,,,,,, half*, , i32, i32) define @test_vlseg8_nxv4f16(half* %base, i32 %vl) { @@ -4210,7 +4210,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4231,14 +4231,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv2f16(half* , i32) +declare {,} @llvm.riscv.vlseg2.nxv2f16(,, half* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv2f16(,, half*, , i32, i32) define @test_vlseg2_nxv2f16(half* %base, i32 %vl) { @@ -4249,7 +4249,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16( undef, undef, half* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4264,14 +4264,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16( undef, undef, half* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f16( %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv2f16(half* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv2f16(,,, half* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv2f16(,,, half*, , i32, i32) define @test_vlseg3_nxv2f16(half* %base, i32 %vl) { @@ -4282,7 +4282,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16( undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4298,14 +4298,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16( undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f16( %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv2f16(half* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv2f16(,,,, half* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f16(,,,, half*, , i32, i32) define @test_vlseg4_nxv2f16(half* %base, i32 %vl) { @@ -4316,7 +4316,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16( undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4333,14 +4333,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16( undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f16( %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv2f16(half* , i32) +declare {,,,,} @llvm.riscv.vlseg5.nxv2f16(,,,,, half* , i32) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2f16(,,,,, half*, , i32, i32) define @test_vlseg5_nxv2f16(half* %base, i32 %vl) { @@ -4351,7 +4351,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16( undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4369,14 +4369,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16( undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2f16( %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv2f16(half* , i32) +declare {,,,,,} @llvm.riscv.vlseg6.nxv2f16(,,,,,, half* , i32) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f16(,,,,,, half*, , i32, i32) define @test_vlseg6_nxv2f16(half* %base, i32 %vl) { @@ -4387,7 +4387,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16( undef, undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4406,14 +4406,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16( undef, undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(half* , i32) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(,,,,,,, half* , i32) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f16(,,,,,,, half*, , i32, i32) define @test_vlseg7_nxv2f16(half* %base, i32 %vl) { @@ -4424,7 +4424,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4444,14 +4444,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(half* , i32) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(,,,,,,,, half* , i32) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f16(,,,,,,,, half*, , i32, i32) define @test_vlseg8_nxv2f16(half* %base, i32 %vl) { @@ -4462,7 +4462,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4483,14 +4483,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv4f32(float* , i32) +declare {,} @llvm.riscv.vlseg2.nxv4f32(,, float* , i32) declare {,} @llvm.riscv.vlseg2.mask.nxv4f32(,, float*, , i32, i32) define @test_vlseg2_nxv4f32(float* %base, i32 %vl) { @@ -4501,7 +4501,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32(float* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32( undef, undef, float* %base, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4516,14 +4516,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32(float* %base, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32( undef, undef, float* %base, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f32( %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv4f32(float* , i32) +declare {,,} @llvm.riscv.vlseg3.nxv4f32(,,, float* , i32) declare {,,} @llvm.riscv.vlseg3.mask.nxv4f32(,,, float*, , i32, i32) define @test_vlseg3_nxv4f32(float* %base, i32 %vl) { @@ -4534,7 +4534,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32(float* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32( undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4550,14 +4550,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32(float* %base, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32( undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4f32( %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv4f32(float* , i32) +declare {,,,} @llvm.riscv.vlseg4.nxv4f32(,,,, float* , i32) declare {,,,} @llvm.riscv.vlseg4.mask.nxv4f32(,,,, float*, , i32, i32) define @test_vlseg4_nxv4f32(float* %base, i32 %vl) { @@ -4568,7 +4568,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32(float* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32( undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4585,7 +4585,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32(float* %base, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32( undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4f32( %1, %1, %1, %1, float* %base, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll @@ -2,7 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+experimental-zvfh \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,} @llvm.riscv.vlseg2.nxv16i16(i16* , i64) +declare {,} @llvm.riscv.vlseg2.nxv16i16(,, i16* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv16i16(,, i16*, , i64, i64) define @test_vlseg2_nxv16i16(i16* %base, i64 %vl) { @@ -13,7 +13,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16( undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -28,14 +28,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16( undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i16( %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv4i32(i32* , i64) +declare {,} @llvm.riscv.vlseg2.nxv4i32(,, i32* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv4i32(,, i32*, , i64, i64) define @test_vlseg2_nxv4i32(i32* %base, i64 %vl) { @@ -46,7 +46,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32( undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -61,14 +61,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32( undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i32( %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv4i32(i32* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv4i32(,,, i32* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv4i32(,,, i32*, , i64, i64) define @test_vlseg3_nxv4i32(i32* %base, i64 %vl) { @@ -79,7 +79,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32( undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -95,14 +95,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32( undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i32( %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv4i32(i32* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv4i32(,,,, i32* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i32(,,,, i32*, , i64, i64) define @test_vlseg4_nxv4i32(i32* %base, i64 %vl) { @@ -113,7 +113,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32( undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -130,14 +130,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32( undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i32( %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv16i8(i8* , i64) +declare {,} @llvm.riscv.vlseg2.nxv16i8(,, i8* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv16i8(,, i8*, , i64, i64) define @test_vlseg2_nxv16i8(i8* %base, i64 %vl) { @@ -148,7 +148,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8( undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -163,14 +163,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8( undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i8( %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv16i8(i8* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv16i8(,,, i8* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv16i8(,,, i8*, , i64, i64) define @test_vlseg3_nxv16i8(i8* %base, i64 %vl) { @@ -181,7 +181,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8( undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -197,14 +197,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8( undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv16i8( %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv16i8(i8* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv16i8(,,,, i8* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv16i8(,,,, i8*, , i64, i64) define @test_vlseg4_nxv16i8(i8* %base, i64 %vl) { @@ -215,7 +215,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8( undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -232,14 +232,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8( undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv16i8( %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv1i64(i64* , i64) +declare {,} @llvm.riscv.vlseg2.nxv1i64(,, i64* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv1i64(,, i64*, , i64, i64) define @test_vlseg2_nxv1i64(i64* %base, i64 %vl) { @@ -250,7 +250,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i64( undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -265,14 +265,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i64( undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i64( %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv1i64(i64* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv1i64(,,, i64* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv1i64(,,, i64*, , i64, i64) define @test_vlseg3_nxv1i64(i64* %base, i64 %vl) { @@ -283,7 +283,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i64( undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -299,14 +299,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i64( undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i64( %1, %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv1i64(i64* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv1i64(,,,, i64* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i64(,,,, i64*, , i64, i64) define @test_vlseg4_nxv1i64(i64* %base, i64 %vl) { @@ -317,7 +317,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i64( undef, undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -334,14 +334,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i64( undef, undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i64( %1, %1, %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1i64(i64* , i64) +declare {,,,,} @llvm.riscv.vlseg5.nxv1i64(,,,,, i64* , i64) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i64(,,,,, i64*, , i64, i64) define @test_vlseg5_nxv1i64(i64* %base, i64 %vl) { @@ -352,7 +352,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i64( undef, undef, undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -370,14 +370,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i64( undef, undef, undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i64( %1, %1, %1, %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1i64(i64* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.nxv1i64(,,,,,, i64* , i64) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i64(,,,,,, i64*, , i64, i64) define @test_vlseg6_nxv1i64(i64* %base, i64 %vl) { @@ -388,7 +388,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i64( undef, undef, undef, undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -407,14 +407,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i64( undef, undef, undef, undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i64( %1, %1, %1, %1, %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i64(i64* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i64(,,,,,,, i64* , i64) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i64(,,,,,,, i64*, , i64, i64) define @test_vlseg7_nxv1i64(i64* %base, i64 %vl) { @@ -425,7 +425,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i64( undef, undef, undef, undef, undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -445,14 +445,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i64( undef, undef, undef, undef, undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i64(i64* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i64(,,,,,,,, i64* , i64) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i64(,,,,,,,, i64*, , i64, i64) define @test_vlseg8_nxv1i64(i64* %base, i64 %vl) { @@ -463,7 +463,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -484,14 +484,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv1i32(i32* , i64) +declare {,} @llvm.riscv.vlseg2.nxv1i32(,, i32* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv1i32(,, i32*, , i64, i64) define @test_vlseg2_nxv1i32(i32* %base, i64 %vl) { @@ -502,7 +502,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32( undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -517,14 +517,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32( undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i32( %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv1i32(i32* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv1i32(,,, i32* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv1i32(,,, i32*, , i64, i64) define @test_vlseg3_nxv1i32(i32* %base, i64 %vl) { @@ -535,7 +535,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32( undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -551,14 +551,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32( undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i32( %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv1i32(i32* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv1i32(,,,, i32* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i32(,,,, i32*, , i64, i64) define @test_vlseg4_nxv1i32(i32* %base, i64 %vl) { @@ -569,7 +569,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32( undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -586,14 +586,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32( undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i32( %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1i32(i32* , i64) +declare {,,,,} @llvm.riscv.vlseg5.nxv1i32(,,,,, i32* , i64) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i32(,,,,, i32*, , i64, i64) define @test_vlseg5_nxv1i32(i32* %base, i64 %vl) { @@ -604,7 +604,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32( undef, undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -622,14 +622,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32( undef, undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i32( %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1i32(i32* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.nxv1i32(,,,,,, i32* , i64) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i32(,,,,,, i32*, , i64, i64) define @test_vlseg6_nxv1i32(i32* %base, i64 %vl) { @@ -640,7 +640,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32( undef, undef, undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -659,14 +659,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32( undef, undef, undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(i32* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(,,,,,,, i32* , i64) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i32(,,,,,,, i32*, , i64, i64) define @test_vlseg7_nxv1i32(i32* %base, i64 %vl) { @@ -677,7 +677,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -697,14 +697,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(i32* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(,,,,,,,, i32* , i64) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i32(,,,,,,,, i32*, , i64, i64) define @test_vlseg8_nxv1i32(i32* %base, i64 %vl) { @@ -715,7 +715,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -736,14 +736,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv8i16(i16* , i64) +declare {,} @llvm.riscv.vlseg2.nxv8i16(,, i16* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv8i16(,, i16*, , i64, i64) define @test_vlseg2_nxv8i16(i16* %base, i64 %vl) { @@ -754,7 +754,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16( undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -769,14 +769,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16( undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i16( %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv8i16(i16* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv8i16(,,, i16* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv8i16(,,, i16*, , i64, i64) define @test_vlseg3_nxv8i16(i16* %base, i64 %vl) { @@ -787,7 +787,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16( undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -803,14 +803,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16( undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8i16( %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv8i16(i16* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv8i16(,,,, i16* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv8i16(,,,, i16*, , i64, i64) define @test_vlseg4_nxv8i16(i16* %base, i64 %vl) { @@ -821,7 +821,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16( undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -838,14 +838,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16( undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8i16( %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv4i8(i8* , i64) +declare {,} @llvm.riscv.vlseg2.nxv4i8(,, i8* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv4i8(,, i8*, , i64, i64) define @test_vlseg2_nxv4i8(i8* %base, i64 %vl) { @@ -856,7 +856,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8( undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -871,14 +871,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8( undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i8( %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv4i8(i8* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv4i8(,,, i8* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv4i8(,,, i8*, , i64, i64) define @test_vlseg3_nxv4i8(i8* %base, i64 %vl) { @@ -889,7 +889,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8( undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -905,14 +905,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8( undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i8( %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv4i8(i8* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv4i8(,,,, i8* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i8(,,,, i8*, , i64, i64) define @test_vlseg4_nxv4i8(i8* %base, i64 %vl) { @@ -923,7 +923,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8( undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -940,14 +940,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8( undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i8( %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv4i8(i8* , i64) +declare {,,,,} @llvm.riscv.vlseg5.nxv4i8(,,,,, i8* , i64) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4i8(,,,,, i8*, , i64, i64) define @test_vlseg5_nxv4i8(i8* %base, i64 %vl) { @@ -958,7 +958,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8( undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -976,14 +976,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8( undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4i8( %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv4i8(i8* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.nxv4i8(,,,,,, i8* , i64) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i8(,,,,,, i8*, , i64, i64) define @test_vlseg6_nxv4i8(i8* %base, i64 %vl) { @@ -994,7 +994,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8( undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1013,14 +1013,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8( undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(i8* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(,,,,,,, i8* , i64) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i8(,,,,,,, i8*, , i64, i64) define @test_vlseg7_nxv4i8(i8* %base, i64 %vl) { @@ -1031,7 +1031,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1051,14 +1051,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(i8* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(,,,,,,,, i8* , i64) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i8(,,,,,,,, i8*, , i64, i64) define @test_vlseg8_nxv4i8(i8* %base, i64 %vl) { @@ -1069,7 +1069,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1090,14 +1090,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv1i16(i16* , i64) +declare {,} @llvm.riscv.vlseg2.nxv1i16(,, i16* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv1i16(,, i16*, , i64, i64) define @test_vlseg2_nxv1i16(i16* %base, i64 %vl) { @@ -1108,7 +1108,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16( undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1123,14 +1123,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16( undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i16( %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv1i16(i16* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv1i16(,,, i16* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv1i16(,,, i16*, , i64, i64) define @test_vlseg3_nxv1i16(i16* %base, i64 %vl) { @@ -1141,7 +1141,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16( undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1157,14 +1157,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16( undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i16( %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv1i16(i16* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv1i16(,,,, i16* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i16(,,,, i16*, , i64, i64) define @test_vlseg4_nxv1i16(i16* %base, i64 %vl) { @@ -1175,7 +1175,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16( undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1192,14 +1192,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16( undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i16( %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1i16(i16* , i64) +declare {,,,,} @llvm.riscv.vlseg5.nxv1i16(,,,,, i16* , i64) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i16(,,,,, i16*, , i64, i64) define @test_vlseg5_nxv1i16(i16* %base, i64 %vl) { @@ -1210,7 +1210,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16( undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1228,14 +1228,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16( undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i16( %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1i16(i16* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.nxv1i16(,,,,,, i16* , i64) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i16(,,,,,, i16*, , i64, i64) define @test_vlseg6_nxv1i16(i16* %base, i64 %vl) { @@ -1246,7 +1246,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16( undef, undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1265,14 +1265,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16( undef, undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(i16* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(,,,,,,, i16* , i64) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i16(,,,,,,, i16*, , i64, i64) define @test_vlseg7_nxv1i16(i16* %base, i64 %vl) { @@ -1283,7 +1283,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1303,14 +1303,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(i16* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(,,,,,,,, i16* , i64) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i16(,,,,,,,, i16*, , i64, i64) define @test_vlseg8_nxv1i16(i16* %base, i64 %vl) { @@ -1321,7 +1321,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1342,14 +1342,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv2i32(i32* , i64) +declare {,} @llvm.riscv.vlseg2.nxv2i32(,, i32* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv2i32(,, i32*, , i64, i64) define @test_vlseg2_nxv2i32(i32* %base, i64 %vl) { @@ -1360,7 +1360,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32( undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1375,14 +1375,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32( undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i32( %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv2i32(i32* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv2i32(,,, i32* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv2i32(,,, i32*, , i64, i64) define @test_vlseg3_nxv2i32(i32* %base, i64 %vl) { @@ -1393,7 +1393,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32( undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1409,14 +1409,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32( undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i32( %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv2i32(i32* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv2i32(,,,, i32* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i32(,,,, i32*, , i64, i64) define @test_vlseg4_nxv2i32(i32* %base, i64 %vl) { @@ -1427,7 +1427,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32( undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1444,14 +1444,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32( undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i32( %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv2i32(i32* , i64) +declare {,,,,} @llvm.riscv.vlseg5.nxv2i32(,,,,, i32* , i64) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i32(,,,,, i32*, , i64, i64) define @test_vlseg5_nxv2i32(i32* %base, i64 %vl) { @@ -1462,7 +1462,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32( undef, undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1480,14 +1480,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32( undef, undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i32( %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv2i32(i32* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.nxv2i32(,,,,,, i32* , i64) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i32(,,,,,, i32*, , i64, i64) define @test_vlseg6_nxv2i32(i32* %base, i64 %vl) { @@ -1498,7 +1498,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32( undef, undef, undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1517,14 +1517,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32( undef, undef, undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(i32* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(,,,,,,, i32* , i64) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i32(,,,,,,, i32*, , i64, i64) define @test_vlseg7_nxv2i32(i32* %base, i64 %vl) { @@ -1535,7 +1535,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1555,14 +1555,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(i32* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(,,,,,,,, i32* , i64) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i32(,,,,,,,, i32*, , i64, i64) define @test_vlseg8_nxv2i32(i32* %base, i64 %vl) { @@ -1573,7 +1573,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1594,14 +1594,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv8i8(i8* , i64) +declare {,} @llvm.riscv.vlseg2.nxv8i8(,, i8* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv8i8(,, i8*, , i64, i64) define @test_vlseg2_nxv8i8(i8* %base, i64 %vl) { @@ -1612,7 +1612,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8( undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1627,14 +1627,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8( undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i8( %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv8i8(i8* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv8i8(,,, i8* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv8i8(,,, i8*, , i64, i64) define @test_vlseg3_nxv8i8(i8* %base, i64 %vl) { @@ -1645,7 +1645,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8( undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1661,14 +1661,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8( undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8i8( %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv8i8(i8* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv8i8(,,,, i8* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv8i8(,,,, i8*, , i64, i64) define @test_vlseg4_nxv8i8(i8* %base, i64 %vl) { @@ -1679,7 +1679,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8( undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1696,14 +1696,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8( undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8i8( %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv8i8(i8* , i64) +declare {,,,,} @llvm.riscv.vlseg5.nxv8i8(,,,,, i8* , i64) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv8i8(,,,,, i8*, , i64, i64) define @test_vlseg5_nxv8i8(i8* %base, i64 %vl) { @@ -1714,7 +1714,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8( undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1732,14 +1732,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8( undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv8i8( %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv8i8(i8* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.nxv8i8(,,,,,, i8* , i64) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv8i8(,,,,,, i8*, , i64, i64) define @test_vlseg6_nxv8i8(i8* %base, i64 %vl) { @@ -1750,7 +1750,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8( undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1769,14 +1769,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8( undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(i8* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(,,,,,,, i8* , i64) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv8i8(,,,,,,, i8*, , i64, i64) define @test_vlseg7_nxv8i8(i8* %base, i64 %vl) { @@ -1787,7 +1787,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1807,14 +1807,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(i8* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(,,,,,,,, i8* , i64) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv8i8(,,,,,,,, i8*, , i64, i64) define @test_vlseg8_nxv8i8(i8* %base, i64 %vl) { @@ -1825,7 +1825,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1846,14 +1846,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv4i64(i64* , i64) +declare {,} @llvm.riscv.vlseg2.nxv4i64(,, i64* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv4i64(,, i64*, , i64, i64) define @test_vlseg2_nxv4i64(i64* %base, i64 %vl) { @@ -1864,7 +1864,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i64(i64* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i64( undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1879,14 +1879,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i64(i64* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i64( undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i64( %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv4i16(i16* , i64) +declare {,} @llvm.riscv.vlseg2.nxv4i16(,, i16* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv4i16(,, i16*, , i64, i64) define @test_vlseg2_nxv4i16(i16* %base, i64 %vl) { @@ -1897,7 +1897,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16( undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1912,14 +1912,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16( undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4i16( %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv4i16(i16* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv4i16(,,, i16* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv4i16(,,, i16*, , i64, i64) define @test_vlseg3_nxv4i16(i16* %base, i64 %vl) { @@ -1930,7 +1930,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16( undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1946,14 +1946,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16( undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4i16( %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv4i16(i16* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv4i16(,,,, i16* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv4i16(,,,, i16*, , i64, i64) define @test_vlseg4_nxv4i16(i16* %base, i64 %vl) { @@ -1964,7 +1964,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16( undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1981,14 +1981,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16( undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4i16( %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv4i16(i16* , i64) +declare {,,,,} @llvm.riscv.vlseg5.nxv4i16(,,,,, i16* , i64) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4i16(,,,,, i16*, , i64, i64) define @test_vlseg5_nxv4i16(i16* %base, i64 %vl) { @@ -1999,7 +1999,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16( undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2017,14 +2017,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16( undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4i16( %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv4i16(i16* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.nxv4i16(,,,,,, i16* , i64) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i16(,,,,,, i16*, , i64, i64) define @test_vlseg6_nxv4i16(i16* %base, i64 %vl) { @@ -2035,7 +2035,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16( undef, undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2054,14 +2054,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16( undef, undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(i16* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(,,,,,,, i16* , i64) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i16(,,,,,,, i16*, , i64, i64) define @test_vlseg7_nxv4i16(i16* %base, i64 %vl) { @@ -2072,7 +2072,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2092,14 +2092,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(i16* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(,,,,,,,, i16* , i64) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i16(,,,,,,,, i16*, , i64, i64) define @test_vlseg8_nxv4i16(i16* %base, i64 %vl) { @@ -2110,7 +2110,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2131,14 +2131,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv1i8(i8* , i64) +declare {,} @llvm.riscv.vlseg2.nxv1i8(,, i8* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv1i8(,, i8*, , i64, i64) define @test_vlseg2_nxv1i8(i8* %base, i64 %vl) { @@ -2149,7 +2149,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8( undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2164,14 +2164,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8( undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1i8( %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv1i8(i8* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv1i8(,,, i8* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv1i8(,,, i8*, , i64, i64) define @test_vlseg3_nxv1i8(i8* %base, i64 %vl) { @@ -2182,7 +2182,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8( undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2198,14 +2198,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8( undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1i8( %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv1i8(i8* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv1i8(,,,, i8* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv1i8(,,,, i8*, , i64, i64) define @test_vlseg4_nxv1i8(i8* %base, i64 %vl) { @@ -2216,7 +2216,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8( undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2233,14 +2233,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8( undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1i8( %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1i8(i8* , i64) +declare {,,,,} @llvm.riscv.vlseg5.nxv1i8(,,,,, i8* , i64) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1i8(,,,,, i8*, , i64, i64) define @test_vlseg5_nxv1i8(i8* %base, i64 %vl) { @@ -2251,7 +2251,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8( undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2269,14 +2269,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8( undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1i8( %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1i8(i8* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.nxv1i8(,,,,,, i8* , i64) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i8(,,,,,, i8*, , i64, i64) define @test_vlseg6_nxv1i8(i8* %base, i64 %vl) { @@ -2287,7 +2287,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8( undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2306,14 +2306,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8( undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(i8* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(,,,,,,, i8* , i64) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i8(,,,,,,, i8*, , i64, i64) define @test_vlseg7_nxv1i8(i8* %base, i64 %vl) { @@ -2324,7 +2324,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2344,14 +2344,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(i8* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(,,,,,,,, i8* , i64) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i8(,,,,,,,, i8*, , i64, i64) define @test_vlseg8_nxv1i8(i8* %base, i64 %vl) { @@ -2362,7 +2362,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2383,14 +2383,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv2i8(i8* , i64) +declare {,} @llvm.riscv.vlseg2.nxv2i8(,, i8* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv2i8(,, i8*, , i64, i64) define @test_vlseg2_nxv2i8(i8* %base, i64 %vl) { @@ -2401,7 +2401,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8( undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2416,14 +2416,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8( undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i8( %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv2i8(i8* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv2i8(,,, i8* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv2i8(,,, i8*, , i64, i64) define @test_vlseg3_nxv2i8(i8* %base, i64 %vl) { @@ -2434,7 +2434,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8( undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2450,14 +2450,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8( undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i8( %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv2i8(i8* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv2i8(,,,, i8* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i8(,,,, i8*, , i64, i64) define @test_vlseg4_nxv2i8(i8* %base, i64 %vl) { @@ -2468,7 +2468,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8( undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2485,14 +2485,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8( undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i8( %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv2i8(i8* , i64) +declare {,,,,} @llvm.riscv.vlseg5.nxv2i8(,,,,, i8* , i64) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i8(,,,,, i8*, , i64, i64) define @test_vlseg5_nxv2i8(i8* %base, i64 %vl) { @@ -2503,7 +2503,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8( undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2521,14 +2521,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8( undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i8( %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv2i8(i8* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.nxv2i8(,,,,,, i8* , i64) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i8(,,,,,, i8*, , i64, i64) define @test_vlseg6_nxv2i8(i8* %base, i64 %vl) { @@ -2539,7 +2539,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8( undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2558,14 +2558,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8( undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(i8* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(,,,,,,, i8* , i64) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i8(,,,,,,, i8*, , i64, i64) define @test_vlseg7_nxv2i8(i8* %base, i64 %vl) { @@ -2576,7 +2576,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2596,14 +2596,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(i8* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(,,,,,,,, i8* , i64) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i8(,,,,,,,, i8*, , i64, i64) define @test_vlseg8_nxv2i8(i8* %base, i64 %vl) { @@ -2614,7 +2614,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2635,14 +2635,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv8i32(i32* , i64) +declare {,} @llvm.riscv.vlseg2.nxv8i32(,, i32* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv8i32(,, i32*, , i64, i64) define @test_vlseg2_nxv8i32(i32* %base, i64 %vl) { @@ -2653,7 +2653,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32( undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2668,14 +2668,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32( undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8i32( %1, %1, i32* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv32i8(i8* , i64) +declare {,} @llvm.riscv.vlseg2.nxv32i8(,, i8* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv32i8(,, i8*, , i64, i64) define @test_vlseg2_nxv32i8(i8* %base, i64 %vl) { @@ -2686,7 +2686,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8( undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2701,14 +2701,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8( undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv32i8( %1, %1, i8* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv2i16(i16* , i64) +declare {,} @llvm.riscv.vlseg2.nxv2i16(,, i16* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv2i16(,, i16*, , i64, i64) define @test_vlseg2_nxv2i16(i16* %base, i64 %vl) { @@ -2719,7 +2719,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16( undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2734,14 +2734,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16( undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i16( %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv2i16(i16* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv2i16(,,, i16* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv2i16(,,, i16*, , i64, i64) define @test_vlseg3_nxv2i16(i16* %base, i64 %vl) { @@ -2752,7 +2752,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16( undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2768,14 +2768,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16( undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i16( %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv2i16(i16* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv2i16(,,,, i16* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i16(,,,, i16*, , i64, i64) define @test_vlseg4_nxv2i16(i16* %base, i64 %vl) { @@ -2786,7 +2786,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16( undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2803,14 +2803,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16( undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i16( %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv2i16(i16* , i64) +declare {,,,,} @llvm.riscv.vlseg5.nxv2i16(,,,,, i16* , i64) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2i16(,,,,, i16*, , i64, i64) define @test_vlseg5_nxv2i16(i16* %base, i64 %vl) { @@ -2821,7 +2821,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16( undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2839,14 +2839,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16( undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2i16( %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv2i16(i16* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.nxv2i16(,,,,,, i16* , i64) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i16(,,,,,, i16*, , i64, i64) define @test_vlseg6_nxv2i16(i16* %base, i64 %vl) { @@ -2857,7 +2857,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16( undef, undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2876,14 +2876,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16( undef, undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(i16* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(,,,,,,, i16* , i64) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i16(,,,,,,, i16*, , i64, i64) define @test_vlseg7_nxv2i16(i16* %base, i64 %vl) { @@ -2894,7 +2894,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2914,14 +2914,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(i16* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(,,,,,,,, i16* , i64) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i16(,,,,,,,, i16*, , i64, i64) define @test_vlseg8_nxv2i16(i16* %base, i64 %vl) { @@ -2932,7 +2932,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2953,14 +2953,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv2i64(i64* , i64) +declare {,} @llvm.riscv.vlseg2.nxv2i64(,, i64* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv2i64(,, i64*, , i64, i64) define @test_vlseg2_nxv2i64(i64* %base, i64 %vl) { @@ -2971,7 +2971,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i64(i64* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i64( undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2986,14 +2986,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i64(i64* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i64( undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2i64( %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv2i64(i64* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv2i64(,,, i64* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv2i64(,,, i64*, , i64, i64) define @test_vlseg3_nxv2i64(i64* %base, i64 %vl) { @@ -3004,7 +3004,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i64(i64* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i64( undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3020,14 +3020,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i64(i64* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i64( undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2i64( %1, %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv2i64(i64* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv2i64(,,,, i64* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv2i64(,,,, i64*, , i64, i64) define @test_vlseg4_nxv2i64(i64* %base, i64 %vl) { @@ -3038,7 +3038,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i64(i64* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i64( undef, undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3055,14 +3055,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i64(i64* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i64( undef, undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2i64( %1, %1, %1, %1, i64* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv16f16(half* , i64) +declare {,} @llvm.riscv.vlseg2.nxv16f16(,, half* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv16f16(,, half*, , i64, i64) define @test_vlseg2_nxv16f16(half* %base, i64 %vl) { @@ -3073,7 +3073,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16(half* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16( undef, undef, half* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3088,14 +3088,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16(half* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16( undef, undef, half* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16f16( %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv4f64(double* , i64) +declare {,} @llvm.riscv.vlseg2.nxv4f64(,, double* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv4f64(,, double*, , i64, i64) define @test_vlseg2_nxv4f64(double* %base, i64 %vl) { @@ -3106,7 +3106,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64(double* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64( undef, undef, double* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3121,14 +3121,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64(double* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64( undef, undef, double* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f64( %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv1f64(double* , i64) +declare {,} @llvm.riscv.vlseg2.nxv1f64(,, double* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv1f64(,, double*, , i64, i64) define @test_vlseg2_nxv1f64(double* %base, i64 %vl) { @@ -3139,7 +3139,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64( undef, undef, double* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3154,14 +3154,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64( undef, undef, double* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f64( %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv1f64(double* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv1f64(,,, double* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv1f64(,,, double*, , i64, i64) define @test_vlseg3_nxv1f64(double* %base, i64 %vl) { @@ -3172,7 +3172,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64( undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3188,14 +3188,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64( undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f64( %1, %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv1f64(double* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv1f64(,,,, double* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f64(,,,, double*, , i64, i64) define @test_vlseg4_nxv1f64(double* %base, i64 %vl) { @@ -3206,7 +3206,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64( undef, undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3223,14 +3223,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64( undef, undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f64( %1, %1, %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1f64(double* , i64) +declare {,,,,} @llvm.riscv.vlseg5.nxv1f64(,,,,, double* , i64) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f64(,,,,, double*, , i64, i64) define @test_vlseg5_nxv1f64(double* %base, i64 %vl) { @@ -3241,7 +3241,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64( undef, undef, undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3259,14 +3259,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64( undef, undef, undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f64( %1, %1, %1, %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1f64(double* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.nxv1f64(,,,,,, double* , i64) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f64(,,,,,, double*, , i64, i64) define @test_vlseg6_nxv1f64(double* %base, i64 %vl) { @@ -3277,7 +3277,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64( undef, undef, undef, undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3296,14 +3296,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64( undef, undef, undef, undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f64( %1, %1, %1, %1, %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(double* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(,,,,,,, double* , i64) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f64(,,,,,,, double*, , i64, i64) define @test_vlseg7_nxv1f64(double* %base, i64 %vl) { @@ -3314,7 +3314,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64( undef, undef, undef, undef, undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3334,14 +3334,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64( undef, undef, undef, undef, undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(double* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(,,,,,,,, double* , i64) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f64(,,,,,,,, double*, , i64, i64) define @test_vlseg8_nxv1f64(double* %base, i64 %vl) { @@ -3352,7 +3352,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3373,14 +3373,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv2f32(float* , i64) +declare {,} @llvm.riscv.vlseg2.nxv2f32(,, float* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv2f32(,, float*, , i64, i64) define @test_vlseg2_nxv2f32(float* %base, i64 %vl) { @@ -3391,7 +3391,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32( undef, undef, float* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3406,14 +3406,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32( undef, undef, float* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f32( %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv2f32(float* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv2f32(,,, float* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv2f32(,,, float*, , i64, i64) define @test_vlseg3_nxv2f32(float* %base, i64 %vl) { @@ -3424,7 +3424,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32( undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3440,14 +3440,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32( undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f32( %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv2f32(float* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv2f32(,,,, float* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f32(,,,, float*, , i64, i64) define @test_vlseg4_nxv2f32(float* %base, i64 %vl) { @@ -3458,7 +3458,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32( undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3475,14 +3475,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32( undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f32( %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv2f32(float* , i64) +declare {,,,,} @llvm.riscv.vlseg5.nxv2f32(,,,,, float* , i64) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2f32(,,,,, float*, , i64, i64) define @test_vlseg5_nxv2f32(float* %base, i64 %vl) { @@ -3493,7 +3493,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32( undef, undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3511,14 +3511,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32( undef, undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2f32( %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv2f32(float* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.nxv2f32(,,,,,, float* , i64) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f32(,,,,,, float*, , i64, i64) define @test_vlseg6_nxv2f32(float* %base, i64 %vl) { @@ -3529,7 +3529,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32( undef, undef, undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3548,14 +3548,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32( undef, undef, undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f32( %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(float* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(,,,,,,, float* , i64) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f32(,,,,,,, float*, , i64, i64) define @test_vlseg7_nxv2f32(float* %base, i64 %vl) { @@ -3566,7 +3566,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32( undef, undef, undef, undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3586,14 +3586,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32( undef, undef, undef, undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(float* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(,,,,,,,, float* , i64) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f32(,,,,,,,, float*, , i64, i64) define @test_vlseg8_nxv2f32(float* %base, i64 %vl) { @@ -3604,7 +3604,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3625,14 +3625,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv1f16(half* , i64) +declare {,} @llvm.riscv.vlseg2.nxv1f16(,, half* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv1f16(,, half*, , i64, i64) define @test_vlseg2_nxv1f16(half* %base, i64 %vl) { @@ -3643,7 +3643,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16( undef, undef, half* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3658,14 +3658,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16( undef, undef, half* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f16( %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv1f16(half* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv1f16(,,, half* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv1f16(,,, half*, , i64, i64) define @test_vlseg3_nxv1f16(half* %base, i64 %vl) { @@ -3676,7 +3676,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16( undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3692,14 +3692,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16( undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f16( %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv1f16(half* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv1f16(,,,, half* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f16(,,,, half*, , i64, i64) define @test_vlseg4_nxv1f16(half* %base, i64 %vl) { @@ -3710,7 +3710,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16( undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3727,14 +3727,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16( undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f16( %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1f16(half* , i64) +declare {,,,,} @llvm.riscv.vlseg5.nxv1f16(,,,,, half* , i64) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f16(,,,,, half*, , i64, i64) define @test_vlseg5_nxv1f16(half* %base, i64 %vl) { @@ -3745,7 +3745,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16( undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3763,14 +3763,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16( undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f16( %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1f16(half* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.nxv1f16(,,,,,, half* , i64) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f16(,,,,,, half*, , i64, i64) define @test_vlseg6_nxv1f16(half* %base, i64 %vl) { @@ -3781,7 +3781,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16( undef, undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3800,14 +3800,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16( undef, undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(half* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(,,,,,,, half* , i64) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f16(,,,,,,, half*, , i64, i64) define @test_vlseg7_nxv1f16(half* %base, i64 %vl) { @@ -3818,7 +3818,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3838,14 +3838,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(half* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(,,,,,,,, half* , i64) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f16(,,,,,,,, half*, , i64, i64) define @test_vlseg8_nxv1f16(half* %base, i64 %vl) { @@ -3856,7 +3856,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3877,14 +3877,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv1f32(float* , i64) +declare {,} @llvm.riscv.vlseg2.nxv1f32(,, float* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv1f32(,, float*, , i64, i64) define @test_vlseg2_nxv1f32(float* %base, i64 %vl) { @@ -3895,7 +3895,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32( undef, undef, float* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3910,14 +3910,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32( undef, undef, float* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv1f32( %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv1f32(float* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv1f32(,,, float* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv1f32(,,, float*, , i64, i64) define @test_vlseg3_nxv1f32(float* %base, i64 %vl) { @@ -3928,7 +3928,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32( undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3944,14 +3944,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32( undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv1f32( %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv1f32(float* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv1f32(,,,, float* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv1f32(,,,, float*, , i64, i64) define @test_vlseg4_nxv1f32(float* %base, i64 %vl) { @@ -3962,7 +3962,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32( undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3979,14 +3979,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32( undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv1f32( %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv1f32(float* , i64) +declare {,,,,} @llvm.riscv.vlseg5.nxv1f32(,,,,, float* , i64) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv1f32(,,,,, float*, , i64, i64) define @test_vlseg5_nxv1f32(float* %base, i64 %vl) { @@ -3997,7 +3997,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32( undef, undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4015,14 +4015,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32( undef, undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv1f32( %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv1f32(float* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.nxv1f32(,,,,,, float* , i64) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f32(,,,,,, float*, , i64, i64) define @test_vlseg6_nxv1f32(float* %base, i64 %vl) { @@ -4033,7 +4033,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32( undef, undef, undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4052,14 +4052,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32( undef, undef, undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv1f32( %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(float* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(,,,,,,, float* , i64) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f32(,,,,,,, float*, , i64, i64) define @test_vlseg7_nxv1f32(float* %base, i64 %vl) { @@ -4070,7 +4070,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32( undef, undef, undef, undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4090,14 +4090,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32( undef, undef, undef, undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(float* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(,,,,,,,, float* , i64) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f32(,,,,,,,, float*, , i64, i64) define @test_vlseg8_nxv1f32(float* %base, i64 %vl) { @@ -4108,7 +4108,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4129,14 +4129,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv8f16(half* , i64) +declare {,} @llvm.riscv.vlseg2.nxv8f16(,, half* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv8f16(,, half*, , i64, i64) define @test_vlseg2_nxv8f16(half* %base, i64 %vl) { @@ -4147,7 +4147,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16(half* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16( undef, undef, half* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4162,14 +4162,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16(half* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16( undef, undef, half* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8f16( %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv8f16(half* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv8f16(,,, half* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv8f16(,,, half*, , i64, i64) define @test_vlseg3_nxv8f16(half* %base, i64 %vl) { @@ -4180,7 +4180,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16(half* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16( undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4196,14 +4196,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16(half* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16( undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv8f16( %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv8f16(half* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv8f16(,,,, half* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv8f16(,,,, half*, , i64, i64) define @test_vlseg4_nxv8f16(half* %base, i64 %vl) { @@ -4214,7 +4214,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16(half* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16( undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4231,14 +4231,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16(half* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16( undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv8f16( %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv8f32(float* , i64) +declare {,} @llvm.riscv.vlseg2.nxv8f32(,, float* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv8f32(,, float*, , i64, i64) define @test_vlseg2_nxv8f32(float* %base, i64 %vl) { @@ -4249,7 +4249,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32(float* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32( undef, undef, float* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4264,14 +4264,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32(float* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32( undef, undef, float* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv8f32( %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv2f64(double* , i64) +declare {,} @llvm.riscv.vlseg2.nxv2f64(,, double* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv2f64(,, double*, , i64, i64) define @test_vlseg2_nxv2f64(double* %base, i64 %vl) { @@ -4282,7 +4282,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64(double* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64( undef, undef, double* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4297,14 +4297,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64(double* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64( undef, undef, double* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f64( %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv2f64(double* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv2f64(,,, double* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv2f64(,,, double*, , i64, i64) define @test_vlseg3_nxv2f64(double* %base, i64 %vl) { @@ -4315,7 +4315,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64(double* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64( undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4331,14 +4331,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64(double* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64( undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f64( %1, %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv2f64(double* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv2f64(,,,, double* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f64(,,,, double*, , i64, i64) define @test_vlseg4_nxv2f64(double* %base, i64 %vl) { @@ -4349,7 +4349,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64(double* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64( undef, undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4366,14 +4366,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64(double* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64( undef, undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f64( %1, %1, %1, %1, double* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv4f16(half* , i64) +declare {,} @llvm.riscv.vlseg2.nxv4f16(,, half* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv4f16(,, half*, , i64, i64) define @test_vlseg2_nxv4f16(half* %base, i64 %vl) { @@ -4384,7 +4384,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16( undef, undef, half* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4399,14 +4399,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16( undef, undef, half* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f16( %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv4f16(half* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv4f16(,,, half* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv4f16(,,, half*, , i64, i64) define @test_vlseg3_nxv4f16(half* %base, i64 %vl) { @@ -4417,7 +4417,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16( undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4433,14 +4433,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16( undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4f16( %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv4f16(half* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv4f16(,,,, half* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv4f16(,,,, half*, , i64, i64) define @test_vlseg4_nxv4f16(half* %base, i64 %vl) { @@ -4451,7 +4451,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16( undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4468,14 +4468,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16( undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4f16( %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv4f16(half* , i64) +declare {,,,,} @llvm.riscv.vlseg5.nxv4f16(,,,,, half* , i64) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv4f16(,,,,, half*, , i64, i64) define @test_vlseg5_nxv4f16(half* %base, i64 %vl) { @@ -4486,7 +4486,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16( undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4504,14 +4504,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16( undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv4f16( %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv4f16(half* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.nxv4f16(,,,,,, half* , i64) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv4f16(,,,,,, half*, , i64, i64) define @test_vlseg6_nxv4f16(half* %base, i64 %vl) { @@ -4522,7 +4522,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16( undef, undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4541,14 +4541,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16( undef, undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv4f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(half* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(,,,,,,, half* , i64) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4f16(,,,,,,, half*, , i64, i64) define @test_vlseg7_nxv4f16(half* %base, i64 %vl) { @@ -4559,7 +4559,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4579,14 +4579,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(half* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(,,,,,,,, half* , i64) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4f16(,,,,,,,, half*, , i64, i64) define @test_vlseg8_nxv4f16(half* %base, i64 %vl) { @@ -4597,7 +4597,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4618,14 +4618,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv2f16(half* , i64) +declare {,} @llvm.riscv.vlseg2.nxv2f16(,, half* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv2f16(,, half*, , i64, i64) define @test_vlseg2_nxv2f16(half* %base, i64 %vl) { @@ -4636,7 +4636,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16( undef, undef, half* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4651,14 +4651,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16( undef, undef, half* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv2f16( %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv2f16(half* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv2f16(,,, half* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv2f16(,,, half*, , i64, i64) define @test_vlseg3_nxv2f16(half* %base, i64 %vl) { @@ -4669,7 +4669,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16( undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4685,14 +4685,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16( undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv2f16( %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv2f16(half* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv2f16(,,,, half* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv2f16(,,,, half*, , i64, i64) define @test_vlseg4_nxv2f16(half* %base, i64 %vl) { @@ -4703,7 +4703,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16( undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4720,14 +4720,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16( undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv2f16( %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlseg5.nxv2f16(half* , i64) +declare {,,,,} @llvm.riscv.vlseg5.nxv2f16(,,,,, half* , i64) declare {,,,,} @llvm.riscv.vlseg5.mask.nxv2f16(,,,,, half*, , i64, i64) define @test_vlseg5_nxv2f16(half* %base, i64 %vl) { @@ -4738,7 +4738,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16( undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4756,14 +4756,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16( undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlseg5.mask.nxv2f16( %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlseg6.nxv2f16(half* , i64) +declare {,,,,,} @llvm.riscv.vlseg6.nxv2f16(,,,,,, half* , i64) declare {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f16(,,,,,, half*, , i64, i64) define @test_vlseg6_nxv2f16(half* %base, i64 %vl) { @@ -4774,7 +4774,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16( undef, undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4793,14 +4793,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16( undef, undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlseg6.mask.nxv2f16( %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(half* , i64) +declare {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(,,,,,,, half* , i64) declare {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f16(,,,,,,, half*, , i64, i64) define @test_vlseg7_nxv2f16(half* %base, i64 %vl) { @@ -4811,7 +4811,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4831,14 +4831,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlseg7.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(half* , i64) +declare {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(,,,,,,,, half* , i64) declare {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f16(,,,,,,,, half*, , i64, i64) define @test_vlseg8_nxv2f16(half* %base, i64 %vl) { @@ -4849,7 +4849,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4870,14 +4870,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlseg8.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlseg2.nxv4f32(float* , i64) +declare {,} @llvm.riscv.vlseg2.nxv4f32(,, float* , i64) declare {,} @llvm.riscv.vlseg2.mask.nxv4f32(,, float*, , i64, i64) define @test_vlseg2_nxv4f32(float* %base, i64 %vl) { @@ -4888,7 +4888,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32(float* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32( undef, undef, float* %base, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4903,14 +4903,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32(float* %base, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32( undef, undef, float* %base, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv4f32( %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlseg3.nxv4f32(float* , i64) +declare {,,} @llvm.riscv.vlseg3.nxv4f32(,,, float* , i64) declare {,,} @llvm.riscv.vlseg3.mask.nxv4f32(,,, float*, , i64, i64) define @test_vlseg3_nxv4f32(float* %base, i64 %vl) { @@ -4921,7 +4921,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32(float* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32( undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4937,14 +4937,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32(float* %base, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32( undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlseg3.mask.nxv4f32( %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlseg4.nxv4f32(float* , i64) +declare {,,,} @llvm.riscv.vlseg4.nxv4f32(,,,, float* , i64) declare {,,,} @llvm.riscv.vlseg4.mask.nxv4f32(,,,, float*, , i64, i64) define @test_vlseg4_nxv4f32(float* %base, i64 %vl) { @@ -4955,7 +4955,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32(float* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32( undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4972,7 +4972,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32(float* %base, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32( undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlseg4.mask.nxv4f32( %1, %1, %1, %1, float* %base, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll @@ -2,7 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+experimental-zvfh \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(,, i16* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i32, i32) define void @test_vlseg2ff_dead_value(i16* %base, i32 %vl, i32* %outvl) { @@ -14,7 +14,7 @@ ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 2 store i32 %1, i32* %outvl ret void @@ -45,7 +45,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 ret %1 } @@ -71,7 +71,7 @@ ; CHECK-NEXT: vlseg2e16ff.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i32 %vl) + tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, i16* %base, i32 %vl) ret void } diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll @@ -2,7 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+experimental-zvfh \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(,, i16* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i32, i32) define @test_vlseg2ff_nxv16i16(i16* %base, i32 %vl, i32* %outvl) { @@ -15,7 +15,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -40,7 +40,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i8(i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i8(,, i8* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i8(,, i8*, , i32, i32) define @test_vlseg2ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { @@ -53,7 +53,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1i8( undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -78,7 +78,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1i8(i8* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1i8(,,, i8* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i8(,,, i8*, , i32, i32) define @test_vlseg3ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { @@ -91,7 +91,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1i8( undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -117,7 +117,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i8(i8* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i8(,,,, i8* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i8(,,,, i8*, , i32, i32) define @test_vlseg4ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { @@ -130,7 +130,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i8( undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -157,7 +157,7 @@ ret %1 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i8(i8* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i8(,,,,, i8* , i32) declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i8(,,,,, i8*, , i32, i32) define @test_vlseg5ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { @@ -170,7 +170,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i8( undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -198,7 +198,7 @@ ret %1 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i8(i8* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i8(,,,,,, i8* , i32) declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i8(,,,,,, i8*, , i32, i32) define @test_vlseg6ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { @@ -211,7 +211,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i8( undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -240,7 +240,7 @@ ret %1 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i8(i8* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i8(,,,,,,, i8* , i32) declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i8(,,,,,,, i8*, , i32, i32) define @test_vlseg7ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { @@ -253,7 +253,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -283,7 +283,7 @@ ret %1 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i8(i8* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i8(,,,,,,,, i8* , i32) declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i8(,,,,,,,, i8*, , i32, i32) define @test_vlseg8ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { @@ -296,7 +296,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -327,7 +327,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i8(i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv16i8(,, i8* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(,, i8*, , i32, i32) define @test_vlseg2ff_nxv16i8(i8* %base, i32 %vl, i32* %outvl) { @@ -340,7 +340,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i8(i8* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i8( undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -365,7 +365,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv16i8(i8* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv16i8(,,, i8* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv16i8(,,, i8*, , i32, i32) define @test_vlseg3ff_nxv16i8(i8* %base, i32 %vl, i32* %outvl) { @@ -378,7 +378,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv16i8(i8* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv16i8( undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -404,7 +404,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv16i8(i8* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv16i8(,,,, i8* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv16i8(,,,, i8*, , i32, i32) define @test_vlseg4ff_nxv16i8(i8* %base, i32 %vl, i32* %outvl) { @@ -417,7 +417,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv16i8(i8* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv16i8( undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -444,7 +444,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i32(i32* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i32(,, i32* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(,, i32*, , i32, i32) define @test_vlseg2ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { @@ -457,7 +457,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i32( undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -482,7 +482,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2i32(i32* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2i32(,,, i32* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i32(,,, i32*, , i32, i32) define @test_vlseg3ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { @@ -495,7 +495,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2i32( undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -521,7 +521,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i32(i32* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i32(,,,, i32* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i32(,,,, i32*, , i32, i32) define @test_vlseg4ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { @@ -534,7 +534,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i32( undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -561,7 +561,7 @@ ret %1 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i32(i32* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i32(,,,,, i32* , i32) declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i32(,,,,, i32*, , i32, i32) define @test_vlseg5ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { @@ -574,7 +574,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i32( undef, undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -602,7 +602,7 @@ ret %1 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i32(i32* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i32(,,,,,, i32* , i32) declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i32(,,,,,, i32*, , i32, i32) define @test_vlseg6ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { @@ -615,7 +615,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i32( undef, undef, undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -644,7 +644,7 @@ ret %1 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i32(i32* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i32(,,,,,,, i32* , i32) declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i32(,,,,,,, i32*, , i32, i32) define @test_vlseg7ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { @@ -657,7 +657,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -687,7 +687,7 @@ ret %1 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i32(i32* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i32(,,,,,,,, i32* , i32) declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i32(,,,,,,,, i32*, , i32, i32) define @test_vlseg8ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { @@ -700,7 +700,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i32(i32* %base, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -731,7 +731,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i16(i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i16(,, i16* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(,, i16*, , i32, i32) define @test_vlseg2ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { @@ -744,7 +744,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i16( undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -769,7 +769,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4i16(i16* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4i16(,,, i16* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i16(,,, i16*, , i32, i32) define @test_vlseg3ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { @@ -782,7 +782,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4i16( undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -808,7 +808,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i16(i16* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i16(,,,, i16* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i16(,,,, i16*, , i32, i32) define @test_vlseg4ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { @@ -821,7 +821,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i16( undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -848,7 +848,7 @@ ret %1 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4i16(i16* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4i16(,,,,, i16* , i32) declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4i16(,,,,, i16*, , i32, i32) define @test_vlseg5ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { @@ -861,7 +861,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4i16( undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -889,7 +889,7 @@ ret %1 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4i16(i16* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4i16(,,,,,, i16* , i32) declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4i16(,,,,,, i16*, , i32, i32) define @test_vlseg6ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { @@ -902,7 +902,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4i16( undef, undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -931,7 +931,7 @@ ret %1 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4i16(i16* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4i16(,,,,,,, i16* , i32) declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4i16(,,,,,,, i16*, , i32, i32) define @test_vlseg7ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { @@ -944,7 +944,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -974,7 +974,7 @@ ret %1 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4i16(i16* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4i16(,,,,,,,, i16* , i32) declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4i16(,,,,,,,, i16*, , i32, i32) define @test_vlseg8ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { @@ -987,7 +987,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -1018,7 +1018,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i32(i32* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i32(,, i32* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i32(,, i32*, , i32, i32) define @test_vlseg2ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { @@ -1031,7 +1031,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1i32( undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -1056,7 +1056,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1i32(i32* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1i32(,,, i32* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i32(,,, i32*, , i32, i32) define @test_vlseg3ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { @@ -1069,7 +1069,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1i32( undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -1095,7 +1095,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i32(i32* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i32(,,,, i32* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i32(,,,, i32*, , i32, i32) define @test_vlseg4ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { @@ -1108,7 +1108,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i32( undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -1135,7 +1135,7 @@ ret %1 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i32(i32* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i32(,,,,, i32* , i32) declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i32(,,,,, i32*, , i32, i32) define @test_vlseg5ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { @@ -1148,7 +1148,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i32( undef, undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -1176,7 +1176,7 @@ ret %1 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i32(i32* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i32(,,,,,, i32* , i32) declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i32(,,,,,, i32*, , i32, i32) define @test_vlseg6ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { @@ -1189,7 +1189,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i32( undef, undef, undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -1218,7 +1218,7 @@ ret %1 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i32(i32* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i32(,,,,,,, i32* , i32) declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i32(,,,,,,, i32*, , i32, i32) define @test_vlseg7ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { @@ -1231,7 +1231,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -1261,7 +1261,7 @@ ret %1 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i32(i32* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i32(,,,,,,,, i32* , i32) declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i32(,,,,,,,, i32*, , i32, i32) define @test_vlseg8ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { @@ -1274,7 +1274,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i32(i32* %base, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -1305,7 +1305,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i16(i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i16(,, i16* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(,, i16*, , i32, i32) define @test_vlseg2ff_nxv8i16(i16* %base, i32 %vl, i32* %outvl) { @@ -1318,7 +1318,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i16(i16* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i16( undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -1343,7 +1343,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv8i16(i16* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv8i16(,,, i16* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8i16(,,, i16*, , i32, i32) define @test_vlseg3ff_nxv8i16(i16* %base, i32 %vl, i32* %outvl) { @@ -1356,7 +1356,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv8i16(i16* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv8i16( undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -1382,7 +1382,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv8i16(i16* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv8i16(,,,, i16* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8i16(,,,, i16*, , i32, i32) define @test_vlseg4ff_nxv8i16(i16* %base, i32 %vl, i32* %outvl) { @@ -1395,7 +1395,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv8i16(i16* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv8i16( undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -1422,7 +1422,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i8(i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i8(,, i8* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(,, i8*, , i32, i32) define @test_vlseg2ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { @@ -1435,7 +1435,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i8( undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -1460,7 +1460,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv8i8(i8* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv8i8(,,, i8* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8i8(,,, i8*, , i32, i32) define @test_vlseg3ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { @@ -1473,7 +1473,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv8i8( undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -1499,7 +1499,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv8i8(i8* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv8i8(,,,, i8* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8i8(,,,, i8*, , i32, i32) define @test_vlseg4ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { @@ -1512,7 +1512,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv8i8( undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -1539,7 +1539,7 @@ ret %1 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv8i8(i8* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv8i8(,,,,, i8* , i32) declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv8i8(,,,,, i8*, , i32, i32) define @test_vlseg5ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { @@ -1552,7 +1552,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv8i8( undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -1580,7 +1580,7 @@ ret %1 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv8i8(i8* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv8i8(,,,,,, i8* , i32) declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv8i8(,,,,,, i8*, , i32, i32) define @test_vlseg6ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { @@ -1593,7 +1593,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv8i8( undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -1622,7 +1622,7 @@ ret %1 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv8i8(i8* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv8i8(,,,,,,, i8* , i32) declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv8i8(,,,,,,, i8*, , i32, i32) define @test_vlseg7ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { @@ -1635,7 +1635,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv8i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -1665,7 +1665,7 @@ ret %1 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv8i8(i8* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv8i8(,,,,,,,, i8* , i32) declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv8i8(,,,,,,,, i8*, , i32, i32) define @test_vlseg8ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { @@ -1678,7 +1678,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv8i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -1709,7 +1709,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i32(i32* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv8i32(,, i32* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i32(,, i32*, , i32, i32) define @test_vlseg2ff_nxv8i32(i32* %base, i32 %vl, i32* %outvl) { @@ -1722,7 +1722,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i32(i32* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i32( undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -1747,7 +1747,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i8(i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i8(,, i8* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i8(,, i8*, , i32, i32) define @test_vlseg2ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { @@ -1760,7 +1760,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i8( undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -1785,7 +1785,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4i8(i8* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4i8(,,, i8* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i8(,,, i8*, , i32, i32) define @test_vlseg3ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { @@ -1798,7 +1798,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4i8( undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -1824,7 +1824,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i8(i8* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i8(,,,, i8* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i8(,,,, i8*, , i32, i32) define @test_vlseg4ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { @@ -1837,7 +1837,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i8( undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -1864,7 +1864,7 @@ ret %1 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4i8(i8* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4i8(,,,,, i8* , i32) declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4i8(,,,,, i8*, , i32, i32) define @test_vlseg5ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { @@ -1877,7 +1877,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4i8( undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -1905,7 +1905,7 @@ ret %1 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4i8(i8* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4i8(,,,,,, i8* , i32) declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4i8(,,,,,, i8*, , i32, i32) define @test_vlseg6ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { @@ -1918,7 +1918,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4i8( undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -1947,7 +1947,7 @@ ret %1 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4i8(i8* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4i8(,,,,,,, i8* , i32) declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4i8(,,,,,,, i8*, , i32, i32) define @test_vlseg7ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { @@ -1960,7 +1960,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -1990,7 +1990,7 @@ ret %1 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4i8(i8* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4i8(,,,,,,,, i8* , i32) declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4i8(,,,,,,,, i8*, , i32, i32) define @test_vlseg8ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { @@ -2003,7 +2003,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -2034,7 +2034,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i16(i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv1i16(,, i16* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i16(,, i16*, , i32, i32) define @test_vlseg2ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { @@ -2047,7 +2047,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1i16( undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -2072,7 +2072,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1i16(i16* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1i16(,,, i16* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i16(,,, i16*, , i32, i32) define @test_vlseg3ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { @@ -2085,7 +2085,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1i16( undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -2111,7 +2111,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i16(i16* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i16(,,,, i16* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i16(,,,, i16*, , i32, i32) define @test_vlseg4ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { @@ -2124,7 +2124,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i16( undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -2151,7 +2151,7 @@ ret %1 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i16(i16* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i16(,,,,, i16* , i32) declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i16(,,,,, i16*, , i32, i32) define @test_vlseg5ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { @@ -2164,7 +2164,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i16( undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -2192,7 +2192,7 @@ ret %1 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i16(i16* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i16(,,,,,, i16* , i32) declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i16(,,,,,, i16*, , i32, i32) define @test_vlseg6ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { @@ -2205,7 +2205,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i16( undef, undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -2234,7 +2234,7 @@ ret %1 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i16(i16* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i16(,,,,,,, i16* , i32) declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i16(,,,,,,, i16*, , i32, i32) define @test_vlseg7ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { @@ -2247,7 +2247,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -2277,7 +2277,7 @@ ret %1 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i16(i16* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i16(,,,,,,,, i16* , i32) declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i16(,,,,,,,, i16*, , i32, i32) define @test_vlseg8ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { @@ -2290,7 +2290,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -2321,7 +2321,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv32i8(i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv32i8(,, i8* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(,, i8*, , i32, i32) define @test_vlseg2ff_nxv32i8(i8* %base, i32 %vl, i32* %outvl) { @@ -2334,7 +2334,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv32i8(i8* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv32i8( undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -2359,7 +2359,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i8(i8* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i8(,, i8* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i8(,, i8*, , i32, i32) define @test_vlseg2ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { @@ -2372,7 +2372,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i8( undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -2397,7 +2397,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2i8(i8* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2i8(,,, i8* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i8(,,, i8*, , i32, i32) define @test_vlseg3ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { @@ -2410,7 +2410,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2i8( undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -2436,7 +2436,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i8(i8* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i8(,,,, i8* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i8(,,,, i8*, , i32, i32) define @test_vlseg4ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { @@ -2449,7 +2449,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i8( undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -2476,7 +2476,7 @@ ret %1 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i8(i8* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i8(,,,,, i8* , i32) declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i8(,,,,, i8*, , i32, i32) define @test_vlseg5ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { @@ -2489,7 +2489,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i8( undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -2517,7 +2517,7 @@ ret %1 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i8(i8* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i8(,,,,,, i8* , i32) declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i8(,,,,,, i8*, , i32, i32) define @test_vlseg6ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { @@ -2530,7 +2530,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i8( undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -2559,7 +2559,7 @@ ret %1 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i8(i8* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i8(,,,,,,, i8* , i32) declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i8(,,,,,,, i8*, , i32, i32) define @test_vlseg7ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { @@ -2572,7 +2572,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -2602,7 +2602,7 @@ ret %1 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i8(i8* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i8(,,,,,,,, i8* , i32) declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i8(,,,,,,,, i8*, , i32, i32) define @test_vlseg8ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { @@ -2615,7 +2615,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i8(i8* %base, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i32 %vl) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -2646,7 +2646,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i16(i16* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv2i16(,, i16* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i16(,, i16*, , i32, i32) define @test_vlseg2ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { @@ -2659,7 +2659,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i16( undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -2684,7 +2684,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2i16(i16* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2i16(,,, i16* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i16(,,, i16*, , i32, i32) define @test_vlseg3ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { @@ -2697,7 +2697,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2i16( undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -2723,7 +2723,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i16(i16* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i16(,,,, i16* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i16(,,,, i16*, , i32, i32) define @test_vlseg4ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { @@ -2736,7 +2736,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i16( undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -2763,7 +2763,7 @@ ret %1 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i16(i16* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i16(,,,,, i16* , i32) declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i16(,,,,, i16*, , i32, i32) define @test_vlseg5ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { @@ -2776,7 +2776,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i16( undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -2804,7 +2804,7 @@ ret %1 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i16(i16* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i16(,,,,,, i16* , i32) declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i16(,,,,,, i16*, , i32, i32) define @test_vlseg6ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { @@ -2817,7 +2817,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i16( undef, undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -2846,7 +2846,7 @@ ret %1 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i16(i16* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i16(,,,,,,, i16* , i32) declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i16(,,,,,,, i16*, , i32, i32) define @test_vlseg7ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { @@ -2859,7 +2859,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -2889,7 +2889,7 @@ ret %1 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i16(i16* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i16(,,,,,,,, i16* , i32) declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i16(,,,,,,,, i16*, , i32, i32) define @test_vlseg8ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { @@ -2902,7 +2902,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i16(i16* %base, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i32 %vl) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -2933,7 +2933,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i32(i32* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv4i32(,, i32* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(,, i32*, , i32, i32) define @test_vlseg2ff_nxv4i32(i32* %base, i32 %vl, i32* %outvl) { @@ -2946,7 +2946,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i32(i32* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i32( undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -2971,7 +2971,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4i32(i32* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4i32(,,, i32* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i32(,,, i32*, , i32, i32) define @test_vlseg3ff_nxv4i32(i32* %base, i32 %vl, i32* %outvl) { @@ -2984,7 +2984,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4i32(i32* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4i32( undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -3010,7 +3010,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i32(i32* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i32(,,,, i32* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i32(,,,, i32*, , i32, i32) define @test_vlseg4ff_nxv4i32(i32* %base, i32 %vl, i32* %outvl) { @@ -3023,7 +3023,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i32(i32* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i32( undef, undef, undef, undef, i32* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -3050,7 +3050,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv16f16(half* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv16f16(,, half* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16f16(,, half*, , i32, i32) define @test_vlseg2ff_nxv16f16(half* %base, i32 %vl, i32* %outvl) { @@ -3063,7 +3063,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16f16(half* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16f16( undef, undef, half* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -3088,7 +3088,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv4f64(double* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv4f64(,, double* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f64(,, double*, , i32, i32) define @test_vlseg2ff_nxv4f64(double* %base, i32 %vl, i32* %outvl) { @@ -3101,7 +3101,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4f64(double* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4f64( undef, undef, double* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -3126,7 +3126,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv1f64(double* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv1f64(,, double* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f64(,, double*, , i32, i32) define @test_vlseg2ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { @@ -3139,7 +3139,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1f64( undef, undef, double* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -3164,7 +3164,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1f64(double* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1f64(,,, double* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f64(,,, double*, , i32, i32) define @test_vlseg3ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { @@ -3177,7 +3177,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1f64( undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -3203,7 +3203,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f64(double* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f64(,,,, double* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f64(,,,, double*, , i32, i32) define @test_vlseg4ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { @@ -3216,7 +3216,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f64( undef, undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -3243,7 +3243,7 @@ ret %1 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f64(double* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f64(,,,,, double* , i32) declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f64(,,,,, double*, , i32, i32) define @test_vlseg5ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { @@ -3256,7 +3256,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f64( undef, undef, undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -3284,7 +3284,7 @@ ret %1 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f64(double* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f64(,,,,,, double* , i32) declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f64(,,,,,, double*, , i32, i32) define @test_vlseg6ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { @@ -3297,7 +3297,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f64( undef, undef, undef, undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -3326,7 +3326,7 @@ ret %1 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f64(double* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f64(,,,,,,, double* , i32) declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f64(,,,,,,, double*, , i32, i32) define @test_vlseg7ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { @@ -3339,7 +3339,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f64( undef, undef, undef, undef, undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -3369,7 +3369,7 @@ ret %1 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f64(double* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f64(,,,,,,,, double* , i32) declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f64(,,,,,,,, double*, , i32, i32) define @test_vlseg8ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { @@ -3382,7 +3382,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f64(double* %base, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f64( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -3413,7 +3413,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv2f32(float* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv2f32(,, float* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f32(,, float*, , i32, i32) define @test_vlseg2ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { @@ -3426,7 +3426,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2f32( undef, undef, float* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -3451,7 +3451,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2f32(float* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2f32(,,, float* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f32(,,, float*, , i32, i32) define @test_vlseg3ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { @@ -3464,7 +3464,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2f32( undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -3490,7 +3490,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f32(float* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f32(,,,, float* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f32(,,,, float*, , i32, i32) define @test_vlseg4ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { @@ -3503,7 +3503,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f32( undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -3530,7 +3530,7 @@ ret %1 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2f32(float* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2f32(,,,,, float* , i32) declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2f32(,,,,, float*, , i32, i32) define @test_vlseg5ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { @@ -3543,7 +3543,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2f32( undef, undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -3571,7 +3571,7 @@ ret %1 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2f32(float* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2f32(,,,,,, float* , i32) declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2f32(,,,,,, float*, , i32, i32) define @test_vlseg6ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { @@ -3584,7 +3584,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2f32( undef, undef, undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -3613,7 +3613,7 @@ ret %1 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2f32(float* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2f32(,,,,,,, float* , i32) declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2f32(,,,,,,, float*, , i32, i32) define @test_vlseg7ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { @@ -3626,7 +3626,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2f32( undef, undef, undef, undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -3656,7 +3656,7 @@ ret %1 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2f32(float* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2f32(,,,,,,,, float* , i32) declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2f32(,,,,,,,, float*, , i32, i32) define @test_vlseg8ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { @@ -3669,7 +3669,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2f32(float* %base, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2f32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -3700,7 +3700,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv1f16(half* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv1f16(,, half* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f16(,, half*, , i32, i32) define @test_vlseg2ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { @@ -3713,7 +3713,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1f16( undef, undef, half* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -3738,7 +3738,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1f16(half* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1f16(,,, half* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f16(,,, half*, , i32, i32) define @test_vlseg3ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { @@ -3751,7 +3751,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1f16( undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -3777,7 +3777,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f16(half* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f16(,,,, half* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f16(,,,, half*, , i32, i32) define @test_vlseg4ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { @@ -3790,7 +3790,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f16( undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -3817,7 +3817,7 @@ ret %1 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f16(half* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f16(,,,,, half* , i32) declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f16(,,,,, half*, , i32, i32) define @test_vlseg5ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { @@ -3830,7 +3830,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f16( undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -3858,7 +3858,7 @@ ret %1 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f16(half* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f16(,,,,,, half* , i32) declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f16(,,,,,, half*, , i32, i32) define @test_vlseg6ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { @@ -3871,7 +3871,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f16( undef, undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -3900,7 +3900,7 @@ ret %1 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f16(half* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f16(,,,,,,, half* , i32) declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f16(,,,,,,, half*, , i32, i32) define @test_vlseg7ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { @@ -3913,7 +3913,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -3943,7 +3943,7 @@ ret %1 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f16(half* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f16(,,,,,,,, half* , i32) declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f16(,,,,,,,, half*, , i32, i32) define @test_vlseg8ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { @@ -3956,7 +3956,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f16(half* %base, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -3987,7 +3987,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv1f32(float* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv1f32(,, float* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f32(,, float*, , i32, i32) define @test_vlseg2ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { @@ -4000,7 +4000,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1f32( undef, undef, float* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -4025,7 +4025,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1f32(float* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv1f32(,,, float* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f32(,,, float*, , i32, i32) define @test_vlseg3ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { @@ -4038,7 +4038,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1f32( undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -4064,7 +4064,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f32(float* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f32(,,,, float* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f32(,,,, float*, , i32, i32) define @test_vlseg4ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { @@ -4077,7 +4077,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f32( undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -4104,7 +4104,7 @@ ret %1 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f32(float* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f32(,,,,, float* , i32) declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f32(,,,,, float*, , i32, i32) define @test_vlseg5ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { @@ -4117,7 +4117,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f32( undef, undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -4145,7 +4145,7 @@ ret %1 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f32(float* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f32(,,,,,, float* , i32) declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f32(,,,,,, float*, , i32, i32) define @test_vlseg6ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { @@ -4158,7 +4158,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f32( undef, undef, undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -4187,7 +4187,7 @@ ret %1 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f32(float* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f32(,,,,,,, float* , i32) declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f32(,,,,,,, float*, , i32, i32) define @test_vlseg7ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { @@ -4200,7 +4200,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f32( undef, undef, undef, undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -4230,7 +4230,7 @@ ret %1 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f32(float* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f32(,,,,,,,, float* , i32) declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f32(,,,,,,,, float*, , i32, i32) define @test_vlseg8ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { @@ -4243,7 +4243,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f32(float* %base, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -4274,7 +4274,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv8f16(half* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv8f16(,, half* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8f16(,, half*, , i32, i32) define @test_vlseg2ff_nxv8f16(half* %base, i32 %vl, i32* %outvl) { @@ -4287,7 +4287,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8f16(half* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8f16( undef, undef, half* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -4312,7 +4312,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv8f16(half* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv8f16(,,, half* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8f16(,,, half*, , i32, i32) define @test_vlseg3ff_nxv8f16(half* %base, i32 %vl, i32* %outvl) { @@ -4325,7 +4325,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv8f16(half* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv8f16( undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -4351,7 +4351,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv8f16(half* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv8f16(,,,, half* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8f16(,,,, half*, , i32, i32) define @test_vlseg4ff_nxv8f16(half* %base, i32 %vl, i32* %outvl) { @@ -4364,7 +4364,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv8f16(half* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv8f16( undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -4391,7 +4391,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv8f32(float* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv8f32(,, float* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8f32(,, float*, , i32, i32) define @test_vlseg2ff_nxv8f32(float* %base, i32 %vl, i32* %outvl) { @@ -4404,7 +4404,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8f32(float* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8f32( undef, undef, float* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -4429,7 +4429,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv2f64(double* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv2f64(,, double* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f64(,, double*, , i32, i32) define @test_vlseg2ff_nxv2f64(double* %base, i32 %vl, i32* %outvl) { @@ -4442,7 +4442,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2f64(double* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2f64( undef, undef, double* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -4467,7 +4467,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2f64(double* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2f64(,,, double* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f64(,,, double*, , i32, i32) define @test_vlseg3ff_nxv2f64(double* %base, i32 %vl, i32* %outvl) { @@ -4480,7 +4480,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2f64(double* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2f64( undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -4506,7 +4506,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f64(double* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f64(,,,, double* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f64(,,,, double*, , i32, i32) define @test_vlseg4ff_nxv2f64(double* %base, i32 %vl, i32* %outvl) { @@ -4519,7 +4519,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f64(double* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f64( undef, undef, undef, undef, double* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -4546,7 +4546,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv4f16(half* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv4f16(,, half* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f16(,, half*, , i32, i32) define @test_vlseg2ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { @@ -4559,7 +4559,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4f16( undef, undef, half* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -4584,7 +4584,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4f16(half* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4f16(,,, half* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4f16(,,, half*, , i32, i32) define @test_vlseg3ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { @@ -4597,7 +4597,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4f16( undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -4623,7 +4623,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4f16(half* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4f16(,,,, half* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4f16(,,,, half*, , i32, i32) define @test_vlseg4ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { @@ -4636,7 +4636,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4f16( undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -4663,7 +4663,7 @@ ret %1 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4f16(half* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4f16(,,,,, half* , i32) declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4f16(,,,,, half*, , i32, i32) define @test_vlseg5ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { @@ -4676,7 +4676,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4f16( undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -4704,7 +4704,7 @@ ret %1 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4f16(half* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4f16(,,,,,, half* , i32) declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4f16(,,,,,, half*, , i32, i32) define @test_vlseg6ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { @@ -4717,7 +4717,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4f16( undef, undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -4746,7 +4746,7 @@ ret %1 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4f16(half* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4f16(,,,,,,, half* , i32) declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4f16(,,,,,,, half*, , i32, i32) define @test_vlseg7ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { @@ -4759,7 +4759,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -4789,7 +4789,7 @@ ret %1 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4f16(half* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4f16(,,,,,,,, half* , i32) declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4f16(,,,,,,,, half*, , i32, i32) define @test_vlseg8ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { @@ -4802,7 +4802,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4f16(half* %base, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -4833,7 +4833,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv2f16(half* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv2f16(,, half* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f16(,, half*, , i32, i32) define @test_vlseg2ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { @@ -4846,7 +4846,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2f16( undef, undef, half* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -4871,7 +4871,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2f16(half* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv2f16(,,, half* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f16(,,, half*, , i32, i32) define @test_vlseg3ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { @@ -4884,7 +4884,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2f16( undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -4910,7 +4910,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f16(half* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f16(,,,, half* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f16(,,,, half*, , i32, i32) define @test_vlseg4ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { @@ -4923,7 +4923,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f16( undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl @@ -4950,7 +4950,7 @@ ret %1 } -declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2f16(half* , i32) +declare {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2f16(,,,,, half* , i32) declare {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2f16(,,,,, half*, , i32, i32) define @test_vlseg5ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { @@ -4963,7 +4963,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2f16( undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,, i32} %0, 1 %2 = extractvalue {,,,,, i32} %0, 5 store i32 %2, i32* %outvl @@ -4991,7 +4991,7 @@ ret %1 } -declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2f16(half* , i32) +declare {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2f16(,,,,,, half* , i32) declare {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2f16(,,,,,, half*, , i32, i32) define @test_vlseg6ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { @@ -5004,7 +5004,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2f16( undef, undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,, i32} %0, 6 store i32 %2, i32* %outvl @@ -5033,7 +5033,7 @@ ret %1 } -declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2f16(half* , i32) +declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2f16(,,,,,,, half* , i32) declare {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2f16(,,,,,,, half*, , i32, i32) define @test_vlseg7ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { @@ -5046,7 +5046,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,, i32} %0, 7 store i32 %2, i32* %outvl @@ -5076,7 +5076,7 @@ ret %1 } -declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2f16(half* , i32) +declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2f16(,,,,,,,, half* , i32) declare {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2f16(,,,,,,,, half*, , i32, i32) define @test_vlseg8ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { @@ -5089,7 +5089,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2f16(half* %base, i32 %vl) + %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i32 %vl) %1 = extractvalue {,,,,,,,, i32} %0, 1 %2 = extractvalue {,,,,,,,, i32} %0, 8 store i32 %2, i32* %outvl @@ -5120,7 +5120,7 @@ ret %1 } -declare {,, i32} @llvm.riscv.vlseg2ff.nxv4f32(float* , i32) +declare {,, i32} @llvm.riscv.vlseg2ff.nxv4f32(,, float* , i32) declare {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f32(,, float*, , i32, i32) define @test_vlseg2ff_nxv4f32(float* %base, i32 %vl, i32* %outvl) { @@ -5133,7 +5133,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4f32(float* %base, i32 %vl) + %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4f32( undef, undef, float* %base, i32 %vl) %1 = extractvalue {,, i32} %0, 1 %2 = extractvalue {,, i32} %0, 2 store i32 %2, i32* %outvl @@ -5158,7 +5158,7 @@ ret %1 } -declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4f32(float* , i32) +declare {,,, i32} @llvm.riscv.vlseg3ff.nxv4f32(,,, float* , i32) declare {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4f32(,,, float*, , i32, i32) define @test_vlseg3ff_nxv4f32(float* %base, i32 %vl, i32* %outvl) { @@ -5171,7 +5171,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4f32(float* %base, i32 %vl) + %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4f32( undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,, i32} %0, 1 %2 = extractvalue {,,, i32} %0, 3 store i32 %2, i32* %outvl @@ -5197,7 +5197,7 @@ ret %1 } -declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4f32(float* , i32) +declare {,,,, i32} @llvm.riscv.vlseg4ff.nxv4f32(,,,, float* , i32) declare {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4f32(,,,, float*, , i32, i32) define @test_vlseg4ff_nxv4f32(float* %base, i32 %vl, i32* %outvl) { @@ -5210,7 +5210,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4f32(float* %base, i32 %vl) + %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4f32( undef, undef, undef, undef, float* %base, i32 %vl) %1 = extractvalue {,,,, i32} %0, 1 %2 = extractvalue {,,,, i32} %0, 4 store i32 %2, i32* %outvl diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll @@ -2,7 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+experimental-zvfh \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(,, i16* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i64, i64) define void @test_vlseg2ff_dead_value(i16* %base, i64 %vl, i64* %outvl) { @@ -14,7 +14,7 @@ ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 2 store i64 %1, i64* %outvl ret void @@ -45,7 +45,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 ret %1 } @@ -71,7 +71,7 @@ ; CHECK-NEXT: vlseg2e16ff.v v8, (a0) ; CHECK-NEXT: ret entry: - tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 %vl) + tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, i16* %base, i64 %vl) ret void } diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll @@ -2,7 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+experimental-zvfh \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(,, i16* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i64, i64) define @test_vlseg2ff_nxv16i16(i16* %base, i64 %vl, i64* %outvl) { @@ -15,7 +15,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -40,7 +40,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i32(i32* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i32(,, i32* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(,, i32*, , i64, i64) define @test_vlseg2ff_nxv4i32(i32* %base, i64 %vl, i64* %outvl) { @@ -53,7 +53,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i32(i32* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i32( undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -78,7 +78,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4i32(i32* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4i32(,,, i32* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i32(,,, i32*, , i64, i64) define @test_vlseg3ff_nxv4i32(i32* %base, i64 %vl, i64* %outvl) { @@ -91,7 +91,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4i32(i32* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4i32( undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -117,7 +117,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i32(i32* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i32(,,,, i32* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i32(,,,, i32*, , i64, i64) define @test_vlseg4ff_nxv4i32(i32* %base, i64 %vl, i64* %outvl) { @@ -130,7 +130,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i32(i32* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i32( undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -157,7 +157,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i8(i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i8(,, i8* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(,, i8*, , i64, i64) define @test_vlseg2ff_nxv16i8(i8* %base, i64 %vl, i64* %outvl) { @@ -170,7 +170,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i8(i8* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i8( undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -195,7 +195,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv16i8(i8* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv16i8(,,, i8* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv16i8(,,, i8*, , i64, i64) define @test_vlseg3ff_nxv16i8(i8* %base, i64 %vl, i64* %outvl) { @@ -208,7 +208,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv16i8(i8* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv16i8( undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -234,7 +234,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv16i8(i8* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv16i8(,,,, i8* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv16i8(,,,, i8*, , i64, i64) define @test_vlseg4ff_nxv16i8(i8* %base, i64 %vl, i64* %outvl) { @@ -247,7 +247,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv16i8(i8* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv16i8( undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -274,7 +274,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i64(i64* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i64(,, i64* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(,, i64*, , i64, i64) define @test_vlseg2ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { @@ -287,7 +287,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i64( undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -312,7 +312,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1i64(i64* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1i64(,,, i64* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i64(,,, i64*, , i64, i64) define @test_vlseg3ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { @@ -325,7 +325,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1i64( undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -351,7 +351,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i64(i64* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i64(,,,, i64* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i64(,,,, i64*, , i64, i64) define @test_vlseg4ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { @@ -364,7 +364,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i64( undef, undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -391,7 +391,7 @@ ret %1 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i64(i64* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i64(,,,,, i64* , i64) declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i64(,,,,, i64*, , i64, i64) define @test_vlseg5ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { @@ -404,7 +404,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i64( undef, undef, undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -432,7 +432,7 @@ ret %1 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i64(i64* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i64(,,,,,, i64* , i64) declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i64(,,,,,, i64*, , i64, i64) define @test_vlseg6ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { @@ -445,7 +445,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i64( undef, undef, undef, undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -474,7 +474,7 @@ ret %1 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i64(i64* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i64(,,,,,,, i64* , i64) declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i64(,,,,,,, i64*, , i64, i64) define @test_vlseg7ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { @@ -487,7 +487,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i64( undef, undef, undef, undef, undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -517,7 +517,7 @@ ret %1 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i64(i64* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i64(,,,,,,,, i64* , i64) declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i64(,,,,,,,, i64*, , i64, i64) define @test_vlseg8ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { @@ -530,7 +530,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i64(i64* %base, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -561,7 +561,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i32(i32* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i32(,, i32* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i32(,, i32*, , i64, i64) define @test_vlseg2ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { @@ -574,7 +574,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i32( undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -599,7 +599,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1i32(i32* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1i32(,,, i32* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i32(,,, i32*, , i64, i64) define @test_vlseg3ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { @@ -612,7 +612,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1i32( undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -638,7 +638,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i32(i32* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i32(,,,, i32* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i32(,,,, i32*, , i64, i64) define @test_vlseg4ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { @@ -651,7 +651,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i32( undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -678,7 +678,7 @@ ret %1 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i32(i32* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i32(,,,,, i32* , i64) declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i32(,,,,, i32*, , i64, i64) define @test_vlseg5ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { @@ -691,7 +691,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i32( undef, undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -719,7 +719,7 @@ ret %1 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i32(i32* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i32(,,,,,, i32* , i64) declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i32(,,,,,, i32*, , i64, i64) define @test_vlseg6ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { @@ -732,7 +732,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i32( undef, undef, undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -761,7 +761,7 @@ ret %1 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i32(i32* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i32(,,,,,,, i32* , i64) declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i32(,,,,,,, i32*, , i64, i64) define @test_vlseg7ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { @@ -774,7 +774,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -804,7 +804,7 @@ ret %1 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i32(i32* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i32(,,,,,,,, i32* , i64) declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i32(,,,,,,,, i32*, , i64, i64) define @test_vlseg8ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { @@ -817,7 +817,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i32(i32* %base, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -848,7 +848,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i16(i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i16(,, i16* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(,, i16*, , i64, i64) define @test_vlseg2ff_nxv8i16(i16* %base, i64 %vl, i64* %outvl) { @@ -861,7 +861,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i16(i16* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i16( undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -886,7 +886,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv8i16(i16* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv8i16(,,, i16* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8i16(,,, i16*, , i64, i64) define @test_vlseg3ff_nxv8i16(i16* %base, i64 %vl, i64* %outvl) { @@ -899,7 +899,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv8i16(i16* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv8i16( undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -925,7 +925,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv8i16(i16* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv8i16(,,,, i16* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8i16(,,,, i16*, , i64, i64) define @test_vlseg4ff_nxv8i16(i16* %base, i64 %vl, i64* %outvl) { @@ -938,7 +938,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv8i16(i16* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv8i16( undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -965,7 +965,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i8(i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i8(,, i8* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i8(,, i8*, , i64, i64) define @test_vlseg2ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { @@ -978,7 +978,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i8( undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -1003,7 +1003,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4i8(i8* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4i8(,,, i8* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i8(,,, i8*, , i64, i64) define @test_vlseg3ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { @@ -1016,7 +1016,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4i8( undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -1042,7 +1042,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i8(i8* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i8(,,,, i8* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i8(,,,, i8*, , i64, i64) define @test_vlseg4ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { @@ -1055,7 +1055,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i8( undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -1082,7 +1082,7 @@ ret %1 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4i8(i8* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4i8(,,,,, i8* , i64) declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4i8(,,,,, i8*, , i64, i64) define @test_vlseg5ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { @@ -1095,7 +1095,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4i8( undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -1123,7 +1123,7 @@ ret %1 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4i8(i8* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4i8(,,,,,, i8* , i64) declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4i8(,,,,,, i8*, , i64, i64) define @test_vlseg6ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { @@ -1136,7 +1136,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4i8( undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -1165,7 +1165,7 @@ ret %1 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4i8(i8* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4i8(,,,,,,, i8* , i64) declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4i8(,,,,,,, i8*, , i64, i64) define @test_vlseg7ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { @@ -1178,7 +1178,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -1208,7 +1208,7 @@ ret %1 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4i8(i8* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4i8(,,,,,,,, i8* , i64) declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4i8(,,,,,,,, i8*, , i64, i64) define @test_vlseg8ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { @@ -1221,7 +1221,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -1252,7 +1252,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i16(i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i16(,, i16* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i16(,, i16*, , i64, i64) define @test_vlseg2ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { @@ -1265,7 +1265,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i16( undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -1290,7 +1290,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1i16(i16* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1i16(,,, i16* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i16(,,, i16*, , i64, i64) define @test_vlseg3ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { @@ -1303,7 +1303,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1i16( undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -1329,7 +1329,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i16(i16* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i16(,,,, i16* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i16(,,,, i16*, , i64, i64) define @test_vlseg4ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { @@ -1342,7 +1342,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i16( undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -1369,7 +1369,7 @@ ret %1 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i16(i16* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i16(,,,,, i16* , i64) declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i16(,,,,, i16*, , i64, i64) define @test_vlseg5ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { @@ -1382,7 +1382,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i16( undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -1410,7 +1410,7 @@ ret %1 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i16(i16* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i16(,,,,,, i16* , i64) declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i16(,,,,,, i16*, , i64, i64) define @test_vlseg6ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { @@ -1423,7 +1423,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i16( undef, undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -1452,7 +1452,7 @@ ret %1 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i16(i16* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i16(,,,,,,, i16* , i64) declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i16(,,,,,,, i16*, , i64, i64) define @test_vlseg7ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { @@ -1465,7 +1465,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -1495,7 +1495,7 @@ ret %1 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i16(i16* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i16(,,,,,,,, i16* , i64) declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i16(,,,,,,,, i16*, , i64, i64) define @test_vlseg8ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { @@ -1508,7 +1508,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -1539,7 +1539,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i32(i32* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i32(,, i32* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(,, i32*, , i64, i64) define @test_vlseg2ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { @@ -1552,7 +1552,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i32( undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -1577,7 +1577,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2i32(i32* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2i32(,,, i32* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i32(,,, i32*, , i64, i64) define @test_vlseg3ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { @@ -1590,7 +1590,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2i32( undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -1616,7 +1616,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i32(i32* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i32(,,,, i32* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i32(,,,, i32*, , i64, i64) define @test_vlseg4ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { @@ -1629,7 +1629,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i32( undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -1656,7 +1656,7 @@ ret %1 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i32(i32* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i32(,,,,, i32* , i64) declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i32(,,,,, i32*, , i64, i64) define @test_vlseg5ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { @@ -1669,7 +1669,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i32( undef, undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -1697,7 +1697,7 @@ ret %1 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i32(i32* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i32(,,,,,, i32* , i64) declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i32(,,,,,, i32*, , i64, i64) define @test_vlseg6ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { @@ -1710,7 +1710,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i32( undef, undef, undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -1739,7 +1739,7 @@ ret %1 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i32(i32* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i32(,,,,,,, i32* , i64) declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i32(,,,,,,, i32*, , i64, i64) define @test_vlseg7ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { @@ -1752,7 +1752,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -1782,7 +1782,7 @@ ret %1 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i32(i32* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i32(,,,,,,,, i32* , i64) declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i32(,,,,,,,, i32*, , i64, i64) define @test_vlseg8ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { @@ -1795,7 +1795,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i32(i32* %base, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -1826,7 +1826,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i8(i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i8(,, i8* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(,, i8*, , i64, i64) define @test_vlseg2ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { @@ -1839,7 +1839,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i8( undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -1864,7 +1864,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv8i8(i8* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv8i8(,,, i8* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8i8(,,, i8*, , i64, i64) define @test_vlseg3ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { @@ -1877,7 +1877,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv8i8( undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -1903,7 +1903,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv8i8(i8* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv8i8(,,,, i8* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8i8(,,,, i8*, , i64, i64) define @test_vlseg4ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { @@ -1916,7 +1916,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv8i8( undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -1943,7 +1943,7 @@ ret %1 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv8i8(i8* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv8i8(,,,,, i8* , i64) declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv8i8(,,,,, i8*, , i64, i64) define @test_vlseg5ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { @@ -1956,7 +1956,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv8i8( undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -1984,7 +1984,7 @@ ret %1 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv8i8(i8* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv8i8(,,,,,, i8* , i64) declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv8i8(,,,,,, i8*, , i64, i64) define @test_vlseg6ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { @@ -1997,7 +1997,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv8i8( undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -2026,7 +2026,7 @@ ret %1 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv8i8(i8* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv8i8(,,,,,,, i8* , i64) declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv8i8(,,,,,,, i8*, , i64, i64) define @test_vlseg7ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { @@ -2039,7 +2039,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv8i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -2069,7 +2069,7 @@ ret %1 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv8i8(i8* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv8i8(,,,,,,,, i8* , i64) declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv8i8(,,,,,,,, i8*, , i64, i64) define @test_vlseg8ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { @@ -2082,7 +2082,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv8i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -2113,7 +2113,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i64(i64* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i64(,, i64* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(,, i64*, , i64, i64) define @test_vlseg2ff_nxv4i64(i64* %base, i64 %vl, i64* %outvl) { @@ -2126,7 +2126,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i64(i64* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i64( undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -2151,7 +2151,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i16(i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4i16(,, i16* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(,, i16*, , i64, i64) define @test_vlseg2ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { @@ -2164,7 +2164,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i16( undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -2189,7 +2189,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4i16(i16* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4i16(,,, i16* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i16(,,, i16*, , i64, i64) define @test_vlseg3ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { @@ -2202,7 +2202,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4i16( undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -2228,7 +2228,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i16(i16* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i16(,,,, i16* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i16(,,,, i16*, , i64, i64) define @test_vlseg4ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { @@ -2241,7 +2241,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i16( undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -2268,7 +2268,7 @@ ret %1 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4i16(i16* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4i16(,,,,, i16* , i64) declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4i16(,,,,, i16*, , i64, i64) define @test_vlseg5ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { @@ -2281,7 +2281,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4i16( undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -2309,7 +2309,7 @@ ret %1 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4i16(i16* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4i16(,,,,,, i16* , i64) declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4i16(,,,,,, i16*, , i64, i64) define @test_vlseg6ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { @@ -2322,7 +2322,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4i16( undef, undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -2351,7 +2351,7 @@ ret %1 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4i16(i16* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4i16(,,,,,,, i16* , i64) declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4i16(,,,,,,, i16*, , i64, i64) define @test_vlseg7ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { @@ -2364,7 +2364,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -2394,7 +2394,7 @@ ret %1 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4i16(i16* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4i16(,,,,,,,, i16* , i64) declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4i16(,,,,,,,, i16*, , i64, i64) define @test_vlseg8ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { @@ -2407,7 +2407,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -2438,7 +2438,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i8(i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1i8(,, i8* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i8(,, i8*, , i64, i64) define @test_vlseg2ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { @@ -2451,7 +2451,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i8( undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -2476,7 +2476,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1i8(i8* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1i8(,,, i8* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i8(,,, i8*, , i64, i64) define @test_vlseg3ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { @@ -2489,7 +2489,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1i8( undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -2515,7 +2515,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i8(i8* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i8(,,,, i8* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i8(,,,, i8*, , i64, i64) define @test_vlseg4ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { @@ -2528,7 +2528,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i8( undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -2555,7 +2555,7 @@ ret %1 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i8(i8* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i8(,,,,, i8* , i64) declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i8(,,,,, i8*, , i64, i64) define @test_vlseg5ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { @@ -2568,7 +2568,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i8( undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -2596,7 +2596,7 @@ ret %1 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i8(i8* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i8(,,,,,, i8* , i64) declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i8(,,,,,, i8*, , i64, i64) define @test_vlseg6ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { @@ -2609,7 +2609,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i8( undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -2638,7 +2638,7 @@ ret %1 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i8(i8* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i8(,,,,,,, i8* , i64) declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i8(,,,,,,, i8*, , i64, i64) define @test_vlseg7ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { @@ -2651,7 +2651,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -2681,7 +2681,7 @@ ret %1 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i8(i8* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i8(,,,,,,,, i8* , i64) declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i8(,,,,,,,, i8*, , i64, i64) define @test_vlseg8ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { @@ -2694,7 +2694,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -2725,7 +2725,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i8(i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i8(,, i8* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i8(,, i8*, , i64, i64) define @test_vlseg2ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { @@ -2738,7 +2738,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i8( undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -2763,7 +2763,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2i8(i8* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2i8(,,, i8* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i8(,,, i8*, , i64, i64) define @test_vlseg3ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { @@ -2776,7 +2776,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2i8( undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -2802,7 +2802,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i8(i8* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i8(,,,, i8* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i8(,,,, i8*, , i64, i64) define @test_vlseg4ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { @@ -2815,7 +2815,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i8( undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -2842,7 +2842,7 @@ ret %1 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i8(i8* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i8(,,,,, i8* , i64) declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i8(,,,,, i8*, , i64, i64) define @test_vlseg5ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { @@ -2855,7 +2855,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i8( undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -2883,7 +2883,7 @@ ret %1 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i8(i8* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i8(,,,,,, i8* , i64) declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i8(,,,,,, i8*, , i64, i64) define @test_vlseg6ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { @@ -2896,7 +2896,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i8( undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -2925,7 +2925,7 @@ ret %1 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i8(i8* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i8(,,,,,,, i8* , i64) declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i8(,,,,,,, i8*, , i64, i64) define @test_vlseg7ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { @@ -2938,7 +2938,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -2968,7 +2968,7 @@ ret %1 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i8(i8* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i8(,,,,,,,, i8* , i64) declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i8(,,,,,,,, i8*, , i64, i64) define @test_vlseg8ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { @@ -2981,7 +2981,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i8(i8* %base, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -3012,7 +3012,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i32(i32* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i32(,, i32* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i32(,, i32*, , i64, i64) define @test_vlseg2ff_nxv8i32(i32* %base, i64 %vl, i64* %outvl) { @@ -3025,7 +3025,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i32(i32* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i32( undef, undef, i32* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -3050,7 +3050,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv32i8(i8* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv32i8(,, i8* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(,, i8*, , i64, i64) define @test_vlseg2ff_nxv32i8(i8* %base, i64 %vl, i64* %outvl) { @@ -3063,7 +3063,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv32i8(i8* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv32i8( undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -3088,7 +3088,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i16(i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i16(,, i16* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i16(,, i16*, , i64, i64) define @test_vlseg2ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { @@ -3101,7 +3101,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i16( undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -3126,7 +3126,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2i16(i16* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2i16(,,, i16* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i16(,,, i16*, , i64, i64) define @test_vlseg3ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { @@ -3139,7 +3139,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2i16( undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -3165,7 +3165,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i16(i16* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i16(,,,, i16* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i16(,,,, i16*, , i64, i64) define @test_vlseg4ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { @@ -3178,7 +3178,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i16( undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -3205,7 +3205,7 @@ ret %1 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i16(i16* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i16(,,,,, i16* , i64) declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i16(,,,,, i16*, , i64, i64) define @test_vlseg5ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { @@ -3218,7 +3218,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i16( undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -3246,7 +3246,7 @@ ret %1 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i16(i16* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i16(,,,,,, i16* , i64) declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i16(,,,,,, i16*, , i64, i64) define @test_vlseg6ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { @@ -3259,7 +3259,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i16( undef, undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -3288,7 +3288,7 @@ ret %1 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i16(i16* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i16(,,,,,,, i16* , i64) declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i16(,,,,,,, i16*, , i64, i64) define @test_vlseg7ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { @@ -3301,7 +3301,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -3331,7 +3331,7 @@ ret %1 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i16(i16* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i16(,,,,,,,, i16* , i64) declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i16(,,,,,,,, i16*, , i64, i64) define @test_vlseg8ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { @@ -3344,7 +3344,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i16(i16* %base, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i64 %vl) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -3375,7 +3375,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i64(i64* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2i64(,, i64* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(,, i64*, , i64, i64) define @test_vlseg2ff_nxv2i64(i64* %base, i64 %vl, i64* %outvl) { @@ -3388,7 +3388,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i64(i64* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i64( undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -3413,7 +3413,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2i64(i64* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2i64(,,, i64* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i64(,,, i64*, , i64, i64) define @test_vlseg3ff_nxv2i64(i64* %base, i64 %vl, i64* %outvl) { @@ -3426,7 +3426,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2i64(i64* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2i64( undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -3452,7 +3452,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i64(i64* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i64(,,,, i64* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i64(,,,, i64*, , i64, i64) define @test_vlseg4ff_nxv2i64(i64* %base, i64 %vl, i64* %outvl) { @@ -3465,7 +3465,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i64(i64* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i64( undef, undef, undef, undef, i64* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -3492,7 +3492,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv16f16(half* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv16f16(,, half* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16f16(,, half*, , i64, i64) define @test_vlseg2ff_nxv16f16(half* %base, i64 %vl, i64* %outvl) { @@ -3505,7 +3505,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16f16(half* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16f16( undef, undef, half* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -3530,7 +3530,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4f64(double* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4f64(,, double* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f64(,, double*, , i64, i64) define @test_vlseg2ff_nxv4f64(double* %base, i64 %vl, i64* %outvl) { @@ -3543,7 +3543,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4f64(double* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4f64( undef, undef, double* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -3568,7 +3568,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1f64(double* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1f64(,, double* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f64(,, double*, , i64, i64) define @test_vlseg2ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { @@ -3581,7 +3581,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1f64( undef, undef, double* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -3606,7 +3606,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1f64(double* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1f64(,,, double* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f64(,,, double*, , i64, i64) define @test_vlseg3ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { @@ -3619,7 +3619,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1f64( undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -3645,7 +3645,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f64(double* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f64(,,,, double* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f64(,,,, double*, , i64, i64) define @test_vlseg4ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { @@ -3658,7 +3658,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f64( undef, undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -3685,7 +3685,7 @@ ret %1 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f64(double* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f64(,,,,, double* , i64) declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f64(,,,,, double*, , i64, i64) define @test_vlseg5ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { @@ -3698,7 +3698,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f64( undef, undef, undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -3726,7 +3726,7 @@ ret %1 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f64(double* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f64(,,,,,, double* , i64) declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f64(,,,,,, double*, , i64, i64) define @test_vlseg6ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { @@ -3739,7 +3739,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f64( undef, undef, undef, undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -3768,7 +3768,7 @@ ret %1 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f64(double* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f64(,,,,,,, double* , i64) declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f64(,,,,,,, double*, , i64, i64) define @test_vlseg7ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { @@ -3781,7 +3781,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f64( undef, undef, undef, undef, undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -3811,7 +3811,7 @@ ret %1 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f64(double* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f64(,,,,,,,, double* , i64) declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f64(,,,,,,,, double*, , i64, i64) define @test_vlseg8ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { @@ -3824,7 +3824,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f64(double* %base, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f64( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -3855,7 +3855,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2f32(float* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2f32(,, float* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f32(,, float*, , i64, i64) define @test_vlseg2ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { @@ -3868,7 +3868,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2f32( undef, undef, float* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -3893,7 +3893,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2f32(float* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2f32(,,, float* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f32(,,, float*, , i64, i64) define @test_vlseg3ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { @@ -3906,7 +3906,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2f32( undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -3932,7 +3932,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f32(float* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f32(,,,, float* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f32(,,,, float*, , i64, i64) define @test_vlseg4ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { @@ -3945,7 +3945,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f32( undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -3972,7 +3972,7 @@ ret %1 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2f32(float* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2f32(,,,,, float* , i64) declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2f32(,,,,, float*, , i64, i64) define @test_vlseg5ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { @@ -3985,7 +3985,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2f32( undef, undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -4013,7 +4013,7 @@ ret %1 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2f32(float* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2f32(,,,,,, float* , i64) declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2f32(,,,,,, float*, , i64, i64) define @test_vlseg6ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { @@ -4026,7 +4026,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2f32( undef, undef, undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -4055,7 +4055,7 @@ ret %1 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2f32(float* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2f32(,,,,,,, float* , i64) declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2f32(,,,,,,, float*, , i64, i64) define @test_vlseg7ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { @@ -4068,7 +4068,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2f32( undef, undef, undef, undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -4098,7 +4098,7 @@ ret %1 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2f32(float* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2f32(,,,,,,,, float* , i64) declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2f32(,,,,,,,, float*, , i64, i64) define @test_vlseg8ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { @@ -4111,7 +4111,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2f32(float* %base, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2f32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -4142,7 +4142,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1f16(half* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1f16(,, half* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f16(,, half*, , i64, i64) define @test_vlseg2ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { @@ -4155,7 +4155,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1f16( undef, undef, half* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -4180,7 +4180,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1f16(half* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1f16(,,, half* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f16(,,, half*, , i64, i64) define @test_vlseg3ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { @@ -4193,7 +4193,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1f16( undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -4219,7 +4219,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f16(half* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f16(,,,, half* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f16(,,,, half*, , i64, i64) define @test_vlseg4ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { @@ -4232,7 +4232,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f16( undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -4259,7 +4259,7 @@ ret %1 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f16(half* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f16(,,,,, half* , i64) declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f16(,,,,, half*, , i64, i64) define @test_vlseg5ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { @@ -4272,7 +4272,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f16( undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -4300,7 +4300,7 @@ ret %1 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f16(half* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f16(,,,,,, half* , i64) declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f16(,,,,,, half*, , i64, i64) define @test_vlseg6ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { @@ -4313,7 +4313,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f16( undef, undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -4342,7 +4342,7 @@ ret %1 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f16(half* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f16(,,,,,,, half* , i64) declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f16(,,,,,,, half*, , i64, i64) define @test_vlseg7ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { @@ -4355,7 +4355,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -4385,7 +4385,7 @@ ret %1 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f16(half* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f16(,,,,,,,, half* , i64) declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f16(,,,,,,,, half*, , i64, i64) define @test_vlseg8ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { @@ -4398,7 +4398,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f16(half* %base, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -4429,7 +4429,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv1f32(float* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv1f32(,, float* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f32(,, float*, , i64, i64) define @test_vlseg2ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { @@ -4442,7 +4442,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1f32( undef, undef, float* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -4467,7 +4467,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1f32(float* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv1f32(,,, float* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f32(,,, float*, , i64, i64) define @test_vlseg3ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { @@ -4480,7 +4480,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1f32( undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -4506,7 +4506,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f32(float* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f32(,,,, float* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f32(,,,, float*, , i64, i64) define @test_vlseg4ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { @@ -4519,7 +4519,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f32( undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -4546,7 +4546,7 @@ ret %1 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f32(float* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f32(,,,,, float* , i64) declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f32(,,,,, float*, , i64, i64) define @test_vlseg5ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { @@ -4559,7 +4559,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f32( undef, undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -4587,7 +4587,7 @@ ret %1 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f32(float* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f32(,,,,,, float* , i64) declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f32(,,,,,, float*, , i64, i64) define @test_vlseg6ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { @@ -4600,7 +4600,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f32( undef, undef, undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -4629,7 +4629,7 @@ ret %1 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f32(float* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f32(,,,,,,, float* , i64) declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f32(,,,,,,, float*, , i64, i64) define @test_vlseg7ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { @@ -4642,7 +4642,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f32( undef, undef, undef, undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -4672,7 +4672,7 @@ ret %1 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f32(float* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f32(,,,,,,,, float* , i64) declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f32(,,,,,,,, float*, , i64, i64) define @test_vlseg8ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { @@ -4685,7 +4685,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f32(float* %base, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -4716,7 +4716,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv8f16(half* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv8f16(,, half* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8f16(,, half*, , i64, i64) define @test_vlseg2ff_nxv8f16(half* %base, i64 %vl, i64* %outvl) { @@ -4729,7 +4729,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8f16(half* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8f16( undef, undef, half* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -4754,7 +4754,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv8f16(half* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv8f16(,,, half* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8f16(,,, half*, , i64, i64) define @test_vlseg3ff_nxv8f16(half* %base, i64 %vl, i64* %outvl) { @@ -4767,7 +4767,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv8f16(half* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv8f16( undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -4793,7 +4793,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv8f16(half* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv8f16(,,,, half* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8f16(,,,, half*, , i64, i64) define @test_vlseg4ff_nxv8f16(half* %base, i64 %vl, i64* %outvl) { @@ -4806,7 +4806,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv8f16(half* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv8f16( undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -4833,7 +4833,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv8f32(float* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv8f32(,, float* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8f32(,, float*, , i64, i64) define @test_vlseg2ff_nxv8f32(float* %base, i64 %vl, i64* %outvl) { @@ -4846,7 +4846,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8f32(float* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8f32( undef, undef, float* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -4871,7 +4871,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2f64(double* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2f64(,, double* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f64(,, double*, , i64, i64) define @test_vlseg2ff_nxv2f64(double* %base, i64 %vl, i64* %outvl) { @@ -4884,7 +4884,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2f64(double* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2f64( undef, undef, double* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -4909,7 +4909,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2f64(double* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2f64(,,, double* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f64(,,, double*, , i64, i64) define @test_vlseg3ff_nxv2f64(double* %base, i64 %vl, i64* %outvl) { @@ -4922,7 +4922,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2f64(double* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2f64( undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -4948,7 +4948,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f64(double* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f64(,,,, double* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f64(,,,, double*, , i64, i64) define @test_vlseg4ff_nxv2f64(double* %base, i64 %vl, i64* %outvl) { @@ -4961,7 +4961,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f64(double* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f64( undef, undef, undef, undef, double* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -4988,7 +4988,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4f16(half* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4f16(,, half* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f16(,, half*, , i64, i64) define @test_vlseg2ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { @@ -5001,7 +5001,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4f16( undef, undef, half* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -5026,7 +5026,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4f16(half* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4f16(,,, half* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4f16(,,, half*, , i64, i64) define @test_vlseg3ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { @@ -5039,7 +5039,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4f16( undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -5065,7 +5065,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4f16(half* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4f16(,,,, half* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4f16(,,,, half*, , i64, i64) define @test_vlseg4ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { @@ -5078,7 +5078,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4f16( undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -5105,7 +5105,7 @@ ret %1 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4f16(half* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4f16(,,,,, half* , i64) declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4f16(,,,,, half*, , i64, i64) define @test_vlseg5ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { @@ -5118,7 +5118,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4f16( undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -5146,7 +5146,7 @@ ret %1 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4f16(half* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4f16(,,,,,, half* , i64) declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4f16(,,,,,, half*, , i64, i64) define @test_vlseg6ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { @@ -5159,7 +5159,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4f16( undef, undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -5188,7 +5188,7 @@ ret %1 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4f16(half* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4f16(,,,,,,, half* , i64) declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4f16(,,,,,,, half*, , i64, i64) define @test_vlseg7ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { @@ -5201,7 +5201,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -5231,7 +5231,7 @@ ret %1 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4f16(half* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4f16(,,,,,,,, half* , i64) declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4f16(,,,,,,,, half*, , i64, i64) define @test_vlseg8ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { @@ -5244,7 +5244,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4f16(half* %base, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -5275,7 +5275,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv2f16(half* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv2f16(,, half* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f16(,, half*, , i64, i64) define @test_vlseg2ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { @@ -5288,7 +5288,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2f16( undef, undef, half* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -5313,7 +5313,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2f16(half* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv2f16(,,, half* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f16(,,, half*, , i64, i64) define @test_vlseg3ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { @@ -5326,7 +5326,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2f16( undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -5352,7 +5352,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f16(half* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f16(,,,, half* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f16(,,,, half*, , i64, i64) define @test_vlseg4ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { @@ -5365,7 +5365,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f16( undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl @@ -5392,7 +5392,7 @@ ret %1 } -declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2f16(half* , i64) +declare {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2f16(,,,,, half* , i64) declare {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2f16(,,,,, half*, , i64, i64) define @test_vlseg5ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { @@ -5405,7 +5405,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2f16( undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,, i64} %0, 1 %2 = extractvalue {,,,,, i64} %0, 5 store i64 %2, i64* %outvl @@ -5433,7 +5433,7 @@ ret %1 } -declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2f16(half* , i64) +declare {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2f16(,,,,,, half* , i64) declare {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2f16(,,,,,, half*, , i64, i64) define @test_vlseg6ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { @@ -5446,7 +5446,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2f16( undef, undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,, i64} %0, 6 store i64 %2, i64* %outvl @@ -5475,7 +5475,7 @@ ret %1 } -declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2f16(half* , i64) +declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2f16(,,,,,,, half* , i64) declare {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2f16(,,,,,,, half*, , i64, i64) define @test_vlseg7ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { @@ -5488,7 +5488,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,, i64} %0, 7 store i64 %2, i64* %outvl @@ -5518,7 +5518,7 @@ ret %1 } -declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2f16(half* , i64) +declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2f16(,,,,,,,, half* , i64) declare {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2f16(,,,,,,,, half*, , i64, i64) define @test_vlseg8ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { @@ -5531,7 +5531,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2f16(half* %base, i64 %vl) + %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i64 %vl) %1 = extractvalue {,,,,,,,, i64} %0, 1 %2 = extractvalue {,,,,,,,, i64} %0, 8 store i64 %2, i64* %outvl @@ -5562,7 +5562,7 @@ ret %1 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv4f32(float* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv4f32(,, float* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f32(,, float*, , i64, i64) define @test_vlseg2ff_nxv4f32(float* %base, i64 %vl, i64* %outvl) { @@ -5575,7 +5575,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4f32(float* %base, i64 %vl) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4f32( undef, undef, float* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl @@ -5600,7 +5600,7 @@ ret %1 } -declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4f32(float* , i64) +declare {,,, i64} @llvm.riscv.vlseg3ff.nxv4f32(,,, float* , i64) declare {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4f32(,,, float*, , i64, i64) define @test_vlseg3ff_nxv4f32(float* %base, i64 %vl, i64* %outvl) { @@ -5613,7 +5613,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4f32(float* %base, i64 %vl) + %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4f32( undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,, i64} %0, 1 %2 = extractvalue {,,, i64} %0, 3 store i64 %2, i64* %outvl @@ -5639,7 +5639,7 @@ ret %1 } -declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4f32(float* , i64) +declare {,,,, i64} @llvm.riscv.vlseg4ff.nxv4f32(,,,, float* , i64) declare {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4f32(,,,, float*, , i64, i64) define @test_vlseg4ff_nxv4f32(float* %base, i64 %vl, i64* %outvl) { @@ -5652,7 +5652,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4f32(float* %base, i64 %vl) + %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4f32( undef, undef, undef, undef, float* %base, i64 %vl) %1 = extractvalue {,,,, i64} %0, 1 %2 = extractvalue {,,,, i64} %0, 4 store i64 %2, i64* %outvl diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll @@ -2,7 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+experimental-zvfh \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,} @llvm.riscv.vlsseg2.nxv16i16(i16*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv16i16(,, i16*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv16i16(,, i16*, i32, , i32, i32) define @test_vlsseg2_nxv16i16(i16* %base, i32 %offset, i32 %vl) { @@ -13,7 +13,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16( undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -28,14 +28,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16( undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i16( %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv1i8(i8*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv1i8(,, i8*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv1i8(,, i8*, i32, , i32, i32) define @test_vlsseg2_nxv1i8(i8* %base, i32 %offset, i32 %vl) { @@ -46,7 +46,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8( undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -61,14 +61,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8( undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv1i8(i8*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv1i8(,,, i8*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i8(,,, i8*, i32, , i32, i32) define @test_vlsseg3_nxv1i8(i8* %base, i32 %offset, i32 %vl) { @@ -79,7 +79,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8( undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -95,14 +95,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8( undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i8( %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1i8(i8*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv1i8(,,,, i8*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i8(,,,, i8*, i32, , i32, i32) define @test_vlsseg4_nxv1i8(i8* %base, i32 %offset, i32 %vl) { @@ -113,7 +113,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8( undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -130,14 +130,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8( undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i8( %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1i8(i8*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.nxv1i8(,,,,, i8*, i32, i32) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i8(,,,,, i8*, i32, , i32, i32) define @test_vlsseg5_nxv1i8(i8* %base, i32 %offset, i32 %vl) { @@ -148,7 +148,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8( undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -166,14 +166,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8( undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i8( %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(i8*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(,,,,,, i8*, i32, i32) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i8(,,,,,, i8*, i32, , i32, i32) define @test_vlsseg6_nxv1i8(i8* %base, i32 %offset, i32 %vl) { @@ -184,7 +184,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8( undef, undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -203,14 +203,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8( undef, undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(i8*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(,,,,,,, i8*, i32, i32) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i8(,,,,,,, i8*, i32, , i32, i32) define @test_vlsseg7_nxv1i8(i8* %base, i32 %offset, i32 %vl) { @@ -221,7 +221,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -241,14 +241,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(i8*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(,,,,,,,, i8*, i32, i32) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i8(,,,,,,,, i8*, i32, , i32, i32) define @test_vlsseg8_nxv1i8(i8* %base, i32 %offset, i32 %vl) { @@ -259,7 +259,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -280,14 +280,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv16i8(i8*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv16i8(,, i8*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv16i8(,, i8*, i32, , i32, i32) define @test_vlsseg2_nxv16i8(i8* %base, i32 %offset, i32 %vl) { @@ -298,7 +298,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8( undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -313,14 +313,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8( undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv16i8(i8*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv16i8(,,, i8*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv16i8(,,, i8*, i32, , i32, i32) define @test_vlsseg3_nxv16i8(i8* %base, i32 %offset, i32 %vl) { @@ -331,7 +331,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8( undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -347,14 +347,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8( undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv16i8( %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv16i8(i8*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv16i8(,,,, i8*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv16i8(,,,, i8*, i32, , i32, i32) define @test_vlsseg4_nxv16i8(i8* %base, i32 %offset, i32 %vl) { @@ -365,7 +365,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8( undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -382,14 +382,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8( undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv16i8( %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv2i32(i32*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv2i32(,, i32*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv2i32(,, i32*, i32, , i32, i32) define @test_vlsseg2_nxv2i32(i32* %base, i32 %offset, i32 %vl) { @@ -400,7 +400,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32( undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -415,14 +415,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32( undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i32( %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv2i32(i32*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv2i32(,,, i32*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i32(,,, i32*, i32, , i32, i32) define @test_vlsseg3_nxv2i32(i32* %base, i32 %offset, i32 %vl) { @@ -433,7 +433,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32( undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -449,14 +449,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32( undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i32( %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2i32(i32*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv2i32(,,,, i32*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i32(,,,, i32*, i32, , i32, i32) define @test_vlsseg4_nxv2i32(i32* %base, i32 %offset, i32 %vl) { @@ -467,7 +467,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32( undef, undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -484,14 +484,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32( undef, undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i32( %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv2i32(i32*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.nxv2i32(,,,,, i32*, i32, i32) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i32(,,,,, i32*, i32, , i32, i32) define @test_vlsseg5_nxv2i32(i32* %base, i32 %offset, i32 %vl) { @@ -502,7 +502,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32( undef, undef, undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -520,14 +520,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32( undef, undef, undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i32( %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(i32*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(,,,,,, i32*, i32, i32) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i32(,,,,,, i32*, i32, , i32, i32) define @test_vlsseg6_nxv2i32(i32* %base, i32 %offset, i32 %vl) { @@ -538,7 +538,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32( undef, undef, undef, undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -557,14 +557,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32( undef, undef, undef, undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(i32*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(,,,,,,, i32*, i32, i32) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i32(,,,,,,, i32*, i32, , i32, i32) define @test_vlsseg7_nxv2i32(i32* %base, i32 %offset, i32 %vl) { @@ -575,7 +575,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -595,14 +595,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(i32*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(,,,,,,,, i32*, i32, i32) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i32(,,,,,,,, i32*, i32, , i32, i32) define @test_vlsseg8_nxv2i32(i32* %base, i32 %offset, i32 %vl) { @@ -613,7 +613,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -634,14 +634,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv4i16(i16*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv4i16(,, i16*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv4i16(,, i16*, i32, , i32, i32) define @test_vlsseg2_nxv4i16(i16* %base, i32 %offset, i32 %vl) { @@ -652,7 +652,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16( undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -667,14 +667,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16( undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i16( %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv4i16(i16*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv4i16(,,, i16*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i16(,,, i16*, i32, , i32, i32) define @test_vlsseg3_nxv4i16(i16* %base, i32 %offset, i32 %vl) { @@ -685,7 +685,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16( undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -701,14 +701,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16( undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i16( %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv4i16(i16*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv4i16(,,,, i16*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i16(,,,, i16*, i32, , i32, i32) define @test_vlsseg4_nxv4i16(i16* %base, i32 %offset, i32 %vl) { @@ -719,7 +719,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16( undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -736,14 +736,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16( undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i16( %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv4i16(i16*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.nxv4i16(,,,,, i16*, i32, i32) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i16(,,,,, i16*, i32, , i32, i32) define @test_vlsseg5_nxv4i16(i16* %base, i32 %offset, i32 %vl) { @@ -754,7 +754,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16( undef, undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -772,14 +772,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16( undef, undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i16( %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(i16*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(,,,,,, i16*, i32, i32) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i16(,,,,,, i16*, i32, , i32, i32) define @test_vlsseg6_nxv4i16(i16* %base, i32 %offset, i32 %vl) { @@ -790,7 +790,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16( undef, undef, undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -809,14 +809,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16( undef, undef, undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(i16*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(,,,,,,, i16*, i32, i32) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i16(,,,,,,, i16*, i32, , i32, i32) define @test_vlsseg7_nxv4i16(i16* %base, i32 %offset, i32 %vl) { @@ -827,7 +827,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -847,14 +847,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(i16*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(,,,,,,,, i16*, i32, i32) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i16(,,,,,,,, i16*, i32, , i32, i32) define @test_vlsseg8_nxv4i16(i16* %base, i32 %offset, i32 %vl) { @@ -865,7 +865,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -886,14 +886,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv1i32(i32*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv1i32(,, i32*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv1i32(,, i32*, i32, , i32, i32) define @test_vlsseg2_nxv1i32(i32* %base, i32 %offset, i32 %vl) { @@ -904,7 +904,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32( undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -919,14 +919,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32( undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i32( %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv1i32(i32*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv1i32(,,, i32*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i32(,,, i32*, i32, , i32, i32) define @test_vlsseg3_nxv1i32(i32* %base, i32 %offset, i32 %vl) { @@ -937,7 +937,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32( undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -953,14 +953,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32( undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i32( %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1i32(i32*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv1i32(,,,, i32*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i32(,,,, i32*, i32, , i32, i32) define @test_vlsseg4_nxv1i32(i32* %base, i32 %offset, i32 %vl) { @@ -971,7 +971,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32( undef, undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -988,14 +988,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32( undef, undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i32( %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1i32(i32*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.nxv1i32(,,,,, i32*, i32, i32) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i32(,,,,, i32*, i32, , i32, i32) define @test_vlsseg5_nxv1i32(i32* %base, i32 %offset, i32 %vl) { @@ -1006,7 +1006,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32( undef, undef, undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1024,14 +1024,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32( undef, undef, undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i32( %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(i32*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(,,,,,, i32*, i32, i32) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i32(,,,,,, i32*, i32, , i32, i32) define @test_vlsseg6_nxv1i32(i32* %base, i32 %offset, i32 %vl) { @@ -1042,7 +1042,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32( undef, undef, undef, undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1061,14 +1061,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32( undef, undef, undef, undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(i32*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(,,,,,,, i32*, i32, i32) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i32(,,,,,,, i32*, i32, , i32, i32) define @test_vlsseg7_nxv1i32(i32* %base, i32 %offset, i32 %vl) { @@ -1079,7 +1079,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1099,14 +1099,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(i32*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(,,,,,,,, i32*, i32, i32) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i32(,,,,,,,, i32*, i32, , i32, i32) define @test_vlsseg8_nxv1i32(i32* %base, i32 %offset, i32 %vl) { @@ -1117,7 +1117,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1138,14 +1138,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv8i16(i16*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv8i16(,, i16*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv8i16(,, i16*, i32, , i32, i32) define @test_vlsseg2_nxv8i16(i16* %base, i32 %offset, i32 %vl) { @@ -1156,7 +1156,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16( undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1171,14 +1171,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16( undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i16( %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv8i16(i16*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv8i16(,,, i16*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv8i16(,,, i16*, i32, , i32, i32) define @test_vlsseg3_nxv8i16(i16* %base, i32 %offset, i32 %vl) { @@ -1189,7 +1189,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16( undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1205,14 +1205,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16( undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8i16( %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv8i16(i16*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv8i16(,,,, i16*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8i16(,,,, i16*, i32, , i32, i32) define @test_vlsseg4_nxv8i16(i16* %base, i32 %offset, i32 %vl) { @@ -1223,7 +1223,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16( undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1240,14 +1240,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16( undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8i16( %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv8i8(i8*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv8i8(,, i8*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv8i8(,, i8*, i32, , i32, i32) define @test_vlsseg2_nxv8i8(i8* %base, i32 %offset, i32 %vl) { @@ -1258,7 +1258,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8( undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1273,14 +1273,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8( undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv8i8(i8*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv8i8(,,, i8*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv8i8(,,, i8*, i32, , i32, i32) define @test_vlsseg3_nxv8i8(i8* %base, i32 %offset, i32 %vl) { @@ -1291,7 +1291,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8( undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1307,14 +1307,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8( undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8i8( %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv8i8(i8*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv8i8(,,,, i8*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8i8(,,,, i8*, i32, , i32, i32) define @test_vlsseg4_nxv8i8(i8* %base, i32 %offset, i32 %vl) { @@ -1325,7 +1325,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8( undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1342,14 +1342,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8( undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8i8( %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv8i8(i8*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.nxv8i8(,,,,, i8*, i32, i32) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv8i8(,,,,, i8*, i32, , i32, i32) define @test_vlsseg5_nxv8i8(i8* %base, i32 %offset, i32 %vl) { @@ -1360,7 +1360,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8( undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1378,14 +1378,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8( undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv8i8( %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(i8*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(,,,,,, i8*, i32, i32) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv8i8(,,,,,, i8*, i32, , i32, i32) define @test_vlsseg6_nxv8i8(i8* %base, i32 %offset, i32 %vl) { @@ -1396,7 +1396,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8( undef, undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1415,14 +1415,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8( undef, undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(i8*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(,,,,,,, i8*, i32, i32) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv8i8(,,,,,,, i8*, i32, , i32, i32) define @test_vlsseg7_nxv8i8(i8* %base, i32 %offset, i32 %vl) { @@ -1433,7 +1433,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1453,14 +1453,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(i8*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(,,,,,,,, i8*, i32, i32) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv8i8(,,,,,,,, i8*, i32, , i32, i32) define @test_vlsseg8_nxv8i8(i8* %base, i32 %offset, i32 %vl) { @@ -1471,7 +1471,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1492,14 +1492,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv8i32(i32*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv8i32(,, i32*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv8i32(,, i32*, i32, , i32, i32) define @test_vlsseg2_nxv8i32(i32* %base, i32 %offset, i32 %vl) { @@ -1510,7 +1510,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32( undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1525,14 +1525,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32( undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i32( %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv4i8(i8*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv4i8(,, i8*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv4i8(,, i8*, i32, , i32, i32) define @test_vlsseg2_nxv4i8(i8* %base, i32 %offset, i32 %vl) { @@ -1543,7 +1543,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8( undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1558,14 +1558,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8( undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv4i8(i8*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv4i8(,,, i8*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i8(,,, i8*, i32, , i32, i32) define @test_vlsseg3_nxv4i8(i8* %base, i32 %offset, i32 %vl) { @@ -1576,7 +1576,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8( undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1592,14 +1592,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8( undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i8( %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv4i8(i8*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv4i8(,,,, i8*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i8(,,,, i8*, i32, , i32, i32) define @test_vlsseg4_nxv4i8(i8* %base, i32 %offset, i32 %vl) { @@ -1610,7 +1610,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8( undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1627,14 +1627,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8( undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i8( %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv4i8(i8*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.nxv4i8(,,,,, i8*, i32, i32) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i8(,,,,, i8*, i32, , i32, i32) define @test_vlsseg5_nxv4i8(i8* %base, i32 %offset, i32 %vl) { @@ -1645,7 +1645,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8( undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1663,14 +1663,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8( undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i8( %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(i8*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(,,,,,, i8*, i32, i32) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i8(,,,,,, i8*, i32, , i32, i32) define @test_vlsseg6_nxv4i8(i8* %base, i32 %offset, i32 %vl) { @@ -1681,7 +1681,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8( undef, undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1700,14 +1700,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8( undef, undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(i8*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(,,,,,,, i8*, i32, i32) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i8(,,,,,,, i8*, i32, , i32, i32) define @test_vlsseg7_nxv4i8(i8* %base, i32 %offset, i32 %vl) { @@ -1718,7 +1718,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1738,14 +1738,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(i8*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(,,,,,,,, i8*, i32, i32) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i8(,,,,,,,, i8*, i32, , i32, i32) define @test_vlsseg8_nxv4i8(i8* %base, i32 %offset, i32 %vl) { @@ -1756,7 +1756,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1777,14 +1777,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv1i16(i16*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv1i16(,, i16*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv1i16(,, i16*, i32, , i32, i32) define @test_vlsseg2_nxv1i16(i16* %base, i32 %offset, i32 %vl) { @@ -1795,7 +1795,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16( undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1810,14 +1810,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16( undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i16( %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv1i16(i16*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv1i16(,,, i16*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i16(,,, i16*, i32, , i32, i32) define @test_vlsseg3_nxv1i16(i16* %base, i32 %offset, i32 %vl) { @@ -1828,7 +1828,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16( undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1844,14 +1844,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16( undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i16( %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1i16(i16*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv1i16(,,,, i16*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i16(,,,, i16*, i32, , i32, i32) define @test_vlsseg4_nxv1i16(i16* %base, i32 %offset, i32 %vl) { @@ -1862,7 +1862,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16( undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1879,14 +1879,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16( undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i16( %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1i16(i16*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.nxv1i16(,,,,, i16*, i32, i32) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i16(,,,,, i16*, i32, , i32, i32) define @test_vlsseg5_nxv1i16(i16* %base, i32 %offset, i32 %vl) { @@ -1897,7 +1897,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16( undef, undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1915,14 +1915,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16( undef, undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i16( %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(i16*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(,,,,,, i16*, i32, i32) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i16(,,,,,, i16*, i32, , i32, i32) define @test_vlsseg6_nxv1i16(i16* %base, i32 %offset, i32 %vl) { @@ -1933,7 +1933,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16( undef, undef, undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1952,14 +1952,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16( undef, undef, undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(i16*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(,,,,,,, i16*, i32, i32) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i16(,,,,,,, i16*, i32, , i32, i32) define @test_vlsseg7_nxv1i16(i16* %base, i32 %offset, i32 %vl) { @@ -1970,7 +1970,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1990,14 +1990,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(i16*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(,,,,,,,, i16*, i32, i32) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i16(,,,,,,,, i16*, i32, , i32, i32) define @test_vlsseg8_nxv1i16(i16* %base, i32 %offset, i32 %vl) { @@ -2008,7 +2008,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2029,14 +2029,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv32i8(i8*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv32i8(,, i8*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv32i8(,, i8*, i32, , i32, i32) define @test_vlsseg2_nxv32i8(i8* %base, i32 %offset, i32 %vl) { @@ -2047,7 +2047,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8( undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2062,14 +2062,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8( undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv32i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv2i8(i8*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv2i8(,, i8*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv2i8(,, i8*, i32, , i32, i32) define @test_vlsseg2_nxv2i8(i8* %base, i32 %offset, i32 %vl) { @@ -2080,7 +2080,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8( undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2095,14 +2095,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8( undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i8( %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv2i8(i8*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv2i8(,,, i8*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i8(,,, i8*, i32, , i32, i32) define @test_vlsseg3_nxv2i8(i8* %base, i32 %offset, i32 %vl) { @@ -2113,7 +2113,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8( undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2129,14 +2129,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8( undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i8( %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2i8(i8*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv2i8(,,,, i8*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i8(,,,, i8*, i32, , i32, i32) define @test_vlsseg4_nxv2i8(i8* %base, i32 %offset, i32 %vl) { @@ -2147,7 +2147,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8( undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2164,14 +2164,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8( undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i8( %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv2i8(i8*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.nxv2i8(,,,,, i8*, i32, i32) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i8(,,,,, i8*, i32, , i32, i32) define @test_vlsseg5_nxv2i8(i8* %base, i32 %offset, i32 %vl) { @@ -2182,7 +2182,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8( undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2200,14 +2200,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8( undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i8( %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(i8*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(,,,,,, i8*, i32, i32) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i8(,,,,,, i8*, i32, , i32, i32) define @test_vlsseg6_nxv2i8(i8* %base, i32 %offset, i32 %vl) { @@ -2218,7 +2218,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8( undef, undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2237,14 +2237,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8( undef, undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(i8*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(,,,,,,, i8*, i32, i32) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i8(,,,,,,, i8*, i32, , i32, i32) define @test_vlsseg7_nxv2i8(i8* %base, i32 %offset, i32 %vl) { @@ -2255,7 +2255,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2275,14 +2275,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(i8*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(,,,,,,,, i8*, i32, i32) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i8(,,,,,,,, i8*, i32, , i32, i32) define @test_vlsseg8_nxv2i8(i8* %base, i32 %offset, i32 %vl) { @@ -2293,7 +2293,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2314,14 +2314,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv2i16(i16*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv2i16(,, i16*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv2i16(,, i16*, i32, , i32, i32) define @test_vlsseg2_nxv2i16(i16* %base, i32 %offset, i32 %vl) { @@ -2332,7 +2332,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16( undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2347,14 +2347,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16( undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i16( %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv2i16(i16*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv2i16(,,, i16*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i16(,,, i16*, i32, , i32, i32) define @test_vlsseg3_nxv2i16(i16* %base, i32 %offset, i32 %vl) { @@ -2365,7 +2365,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16( undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2381,14 +2381,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16( undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i16( %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2i16(i16*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv2i16(,,,, i16*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i16(,,,, i16*, i32, , i32, i32) define @test_vlsseg4_nxv2i16(i16* %base, i32 %offset, i32 %vl) { @@ -2399,7 +2399,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16( undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2416,14 +2416,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16( undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i16( %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv2i16(i16*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.nxv2i16(,,,,, i16*, i32, i32) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i16(,,,,, i16*, i32, , i32, i32) define @test_vlsseg5_nxv2i16(i16* %base, i32 %offset, i32 %vl) { @@ -2434,7 +2434,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16( undef, undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2452,14 +2452,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16( undef, undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i16( %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(i16*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(,,,,,, i16*, i32, i32) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i16(,,,,,, i16*, i32, , i32, i32) define @test_vlsseg6_nxv2i16(i16* %base, i32 %offset, i32 %vl) { @@ -2470,7 +2470,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16( undef, undef, undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2489,14 +2489,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16( undef, undef, undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(i16*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(,,,,,,, i16*, i32, i32) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i16(,,,,,,, i16*, i32, , i32, i32) define @test_vlsseg7_nxv2i16(i16* %base, i32 %offset, i32 %vl) { @@ -2507,7 +2507,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2527,14 +2527,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(i16*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(,,,,,,,, i16*, i32, i32) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i16(,,,,,,,, i16*, i32, , i32, i32) define @test_vlsseg8_nxv2i16(i16* %base, i32 %offset, i32 %vl) { @@ -2545,7 +2545,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2566,14 +2566,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv4i32(i32*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv4i32(,, i32*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv4i32(,, i32*, i32, , i32, i32) define @test_vlsseg2_nxv4i32(i32* %base, i32 %offset, i32 %vl) { @@ -2584,7 +2584,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32( undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2599,14 +2599,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32( undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i32( %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv4i32(i32*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv4i32(,,, i32*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i32(,,, i32*, i32, , i32, i32) define @test_vlsseg3_nxv4i32(i32* %base, i32 %offset, i32 %vl) { @@ -2617,7 +2617,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32( undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2633,14 +2633,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32( undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i32( %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv4i32(i32*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv4i32(,,,, i32*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i32(,,,, i32*, i32, , i32, i32) define @test_vlsseg4_nxv4i32(i32* %base, i32 %offset, i32 %vl) { @@ -2651,7 +2651,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32( undef, undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2668,14 +2668,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32( undef, undef, undef, undef, i32* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i32( %1, %1, %1, %1, i32* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv16f16(half*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv16f16(,, half*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv16f16(,, half*, i32, , i32, i32) define @test_vlsseg2_nxv16f16(half* %base, i32 %offset, i32 %vl) { @@ -2686,7 +2686,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16( undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2701,14 +2701,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16( undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16f16( %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv4f64(double*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv4f64(,, double*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv4f64(,, double*, i32, , i32, i32) define @test_vlsseg2_nxv4f64(double* %base, i32 %offset, i32 %vl) { @@ -2719,7 +2719,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64( undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2734,14 +2734,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64( undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f64( %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv1f64(double*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv1f64(,, double*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv1f64(,, double*, i32, , i32, i32) define @test_vlsseg2_nxv1f64(double* %base, i32 %offset, i32 %vl) { @@ -2752,7 +2752,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64( undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2767,14 +2767,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64( undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f64( %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv1f64(double*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv1f64(,,, double*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f64(,,, double*, i32, , i32, i32) define @test_vlsseg3_nxv1f64(double* %base, i32 %offset, i32 %vl) { @@ -2785,7 +2785,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64( undef, undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2801,14 +2801,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64( undef, undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f64( %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1f64(double*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv1f64(,,,, double*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f64(,,,, double*, i32, , i32, i32) define @test_vlsseg4_nxv1f64(double* %base, i32 %offset, i32 %vl) { @@ -2819,7 +2819,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64( undef, undef, undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2836,14 +2836,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64( undef, undef, undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f64( %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1f64(double*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.nxv1f64(,,,,, double*, i32, i32) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f64(,,,,, double*, i32, , i32, i32) define @test_vlsseg5_nxv1f64(double* %base, i32 %offset, i32 %vl) { @@ -2854,7 +2854,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64( undef, undef, undef, undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2872,14 +2872,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64( undef, undef, undef, undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f64( %1, %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(double*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(,,,,,, double*, i32, i32) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f64(,,,,,, double*, i32, , i32, i32) define @test_vlsseg6_nxv1f64(double* %base, i32 %offset, i32 %vl) { @@ -2890,7 +2890,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64( undef, undef, undef, undef, undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2909,14 +2909,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64( undef, undef, undef, undef, undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f64( %1, %1, %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(double*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(,,,,,,, double*, i32, i32) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f64(,,,,,,, double*, i32, , i32, i32) define @test_vlsseg7_nxv1f64(double* %base, i32 %offset, i32 %vl) { @@ -2927,7 +2927,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64( undef, undef, undef, undef, undef, undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2947,14 +2947,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64( undef, undef, undef, undef, undef, undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(double*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(,,,,,,,, double*, i32, i32) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f64(,,,,,,,, double*, i32, , i32, i32) define @test_vlsseg8_nxv1f64(double* %base, i32 %offset, i32 %vl) { @@ -2965,7 +2965,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2986,14 +2986,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv2f32(float*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv2f32(,, float*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv2f32(,, float*, i32, , i32, i32) define @test_vlsseg2_nxv2f32(float* %base, i32 %offset, i32 %vl) { @@ -3004,7 +3004,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32( undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3019,14 +3019,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32( undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f32( %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv2f32(float*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv2f32(,,, float*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f32(,,, float*, i32, , i32, i32) define @test_vlsseg3_nxv2f32(float* %base, i32 %offset, i32 %vl) { @@ -3037,7 +3037,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32( undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3053,14 +3053,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32( undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f32( %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2f32(float*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv2f32(,,,, float*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f32(,,,, float*, i32, , i32, i32) define @test_vlsseg4_nxv2f32(float* %base, i32 %offset, i32 %vl) { @@ -3071,7 +3071,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32( undef, undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3088,14 +3088,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32( undef, undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f32( %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv2f32(float*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.nxv2f32(,,,,, float*, i32, i32) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f32(,,,,, float*, i32, , i32, i32) define @test_vlsseg5_nxv2f32(float* %base, i32 %offset, i32 %vl) { @@ -3106,7 +3106,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32( undef, undef, undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3124,14 +3124,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32( undef, undef, undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f32( %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(float*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(,,,,,, float*, i32, i32) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f32(,,,,,, float*, i32, , i32, i32) define @test_vlsseg6_nxv2f32(float* %base, i32 %offset, i32 %vl) { @@ -3142,7 +3142,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32( undef, undef, undef, undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3161,14 +3161,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32( undef, undef, undef, undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f32( %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(float*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(,,,,,,, float*, i32, i32) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f32(,,,,,,, float*, i32, , i32, i32) define @test_vlsseg7_nxv2f32(float* %base, i32 %offset, i32 %vl) { @@ -3179,7 +3179,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32( undef, undef, undef, undef, undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3199,14 +3199,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32( undef, undef, undef, undef, undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(float*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(,,,,,,,, float*, i32, i32) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f32(,,,,,,,, float*, i32, , i32, i32) define @test_vlsseg8_nxv2f32(float* %base, i32 %offset, i32 %vl) { @@ -3217,7 +3217,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3238,14 +3238,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv1f16(half*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv1f16(,, half*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv1f16(,, half*, i32, , i32, i32) define @test_vlsseg2_nxv1f16(half* %base, i32 %offset, i32 %vl) { @@ -3256,7 +3256,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16( undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3271,14 +3271,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16( undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f16( %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv1f16(half*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv1f16(,,, half*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f16(,,, half*, i32, , i32, i32) define @test_vlsseg3_nxv1f16(half* %base, i32 %offset, i32 %vl) { @@ -3289,7 +3289,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16( undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3305,14 +3305,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16( undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f16( %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1f16(half*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv1f16(,,,, half*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f16(,,,, half*, i32, , i32, i32) define @test_vlsseg4_nxv1f16(half* %base, i32 %offset, i32 %vl) { @@ -3323,7 +3323,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16( undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3340,14 +3340,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16( undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f16( %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1f16(half*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.nxv1f16(,,,,, half*, i32, i32) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f16(,,,,, half*, i32, , i32, i32) define @test_vlsseg5_nxv1f16(half* %base, i32 %offset, i32 %vl) { @@ -3358,7 +3358,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16( undef, undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3376,14 +3376,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16( undef, undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f16( %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(half*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(,,,,,, half*, i32, i32) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f16(,,,,,, half*, i32, , i32, i32) define @test_vlsseg6_nxv1f16(half* %base, i32 %offset, i32 %vl) { @@ -3394,7 +3394,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16( undef, undef, undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3413,14 +3413,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16( undef, undef, undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f16( %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(half*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(,,,,,,, half*, i32, i32) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f16(,,,,,,, half*, i32, , i32, i32) define @test_vlsseg7_nxv1f16(half* %base, i32 %offset, i32 %vl) { @@ -3431,7 +3431,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3451,14 +3451,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(half*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(,,,,,,,, half*, i32, i32) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f16(,,,,,,,, half*, i32, , i32, i32) define @test_vlsseg8_nxv1f16(half* %base, i32 %offset, i32 %vl) { @@ -3469,7 +3469,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3490,14 +3490,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv1f32(float*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv1f32(,, float*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv1f32(,, float*, i32, , i32, i32) define @test_vlsseg2_nxv1f32(float* %base, i32 %offset, i32 %vl) { @@ -3508,7 +3508,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32( undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3523,14 +3523,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32( undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f32( %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv1f32(float*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv1f32(,,, float*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f32(,,, float*, i32, , i32, i32) define @test_vlsseg3_nxv1f32(float* %base, i32 %offset, i32 %vl) { @@ -3541,7 +3541,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32( undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3557,14 +3557,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32( undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f32( %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1f32(float*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv1f32(,,,, float*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f32(,,,, float*, i32, , i32, i32) define @test_vlsseg4_nxv1f32(float* %base, i32 %offset, i32 %vl) { @@ -3575,7 +3575,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32( undef, undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3592,14 +3592,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32( undef, undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f32( %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1f32(float*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.nxv1f32(,,,,, float*, i32, i32) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f32(,,,,, float*, i32, , i32, i32) define @test_vlsseg5_nxv1f32(float* %base, i32 %offset, i32 %vl) { @@ -3610,7 +3610,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32( undef, undef, undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3628,14 +3628,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32( undef, undef, undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f32( %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(float*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(,,,,,, float*, i32, i32) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f32(,,,,,, float*, i32, , i32, i32) define @test_vlsseg6_nxv1f32(float* %base, i32 %offset, i32 %vl) { @@ -3646,7 +3646,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32( undef, undef, undef, undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3665,14 +3665,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32( undef, undef, undef, undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f32( %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(float*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(,,,,,,, float*, i32, i32) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f32(,,,,,,, float*, i32, , i32, i32) define @test_vlsseg7_nxv1f32(float* %base, i32 %offset, i32 %vl) { @@ -3683,7 +3683,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32( undef, undef, undef, undef, undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3703,14 +3703,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32( undef, undef, undef, undef, undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(float*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(,,,,,,,, float*, i32, i32) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f32(,,,,,,,, float*, i32, , i32, i32) define @test_vlsseg8_nxv1f32(float* %base, i32 %offset, i32 %vl) { @@ -3721,7 +3721,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3742,14 +3742,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv8f16(half*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv8f16(,, half*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv8f16(,, half*, i32, , i32, i32) define @test_vlsseg2_nxv8f16(half* %base, i32 %offset, i32 %vl) { @@ -3760,7 +3760,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16( undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3775,14 +3775,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16( undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8f16( %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv8f16(half*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv8f16(,,, half*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv8f16(,,, half*, i32, , i32, i32) define @test_vlsseg3_nxv8f16(half* %base, i32 %offset, i32 %vl) { @@ -3793,7 +3793,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16( undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3809,14 +3809,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16( undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8f16( %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv8f16(half*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv8f16(,,,, half*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8f16(,,,, half*, i32, , i32, i32) define @test_vlsseg4_nxv8f16(half* %base, i32 %offset, i32 %vl) { @@ -3827,7 +3827,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16( undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3844,14 +3844,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16( undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8f16( %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv8f32(float*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv8f32(,, float*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv8f32(,, float*, i32, , i32, i32) define @test_vlsseg2_nxv8f32(float* %base, i32 %offset, i32 %vl) { @@ -3862,7 +3862,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32( undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3877,14 +3877,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32( undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8f32( %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv2f64(double*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv2f64(,, double*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv2f64(,, double*, i32, , i32, i32) define @test_vlsseg2_nxv2f64(double* %base, i32 %offset, i32 %vl) { @@ -3895,7 +3895,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64( undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3910,14 +3910,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64( undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f64( %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv2f64(double*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv2f64(,,, double*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f64(,,, double*, i32, , i32, i32) define @test_vlsseg3_nxv2f64(double* %base, i32 %offset, i32 %vl) { @@ -3928,7 +3928,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64( undef, undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3944,14 +3944,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64( undef, undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f64( %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2f64(double*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv2f64(,,,, double*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f64(,,,, double*, i32, , i32, i32) define @test_vlsseg4_nxv2f64(double* %base, i32 %offset, i32 %vl) { @@ -3962,7 +3962,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64( undef, undef, undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3979,14 +3979,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64( undef, undef, undef, undef, double* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f64( %1, %1, %1, %1, double* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv4f16(half*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv4f16(,, half*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv4f16(,, half*, i32, , i32, i32) define @test_vlsseg2_nxv4f16(half* %base, i32 %offset, i32 %vl) { @@ -3997,7 +3997,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16( undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4012,14 +4012,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16( undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f16( %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv4f16(half*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv4f16(,,, half*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv4f16(,,, half*, i32, , i32, i32) define @test_vlsseg3_nxv4f16(half* %base, i32 %offset, i32 %vl) { @@ -4030,7 +4030,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16( undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4046,14 +4046,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16( undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4f16( %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv4f16(half*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv4f16(,,,, half*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4f16(,,,, half*, i32, , i32, i32) define @test_vlsseg4_nxv4f16(half* %base, i32 %offset, i32 %vl) { @@ -4064,7 +4064,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16( undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4081,14 +4081,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16( undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4f16( %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv4f16(half*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.nxv4f16(,,,,, half*, i32, i32) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4f16(,,,,, half*, i32, , i32, i32) define @test_vlsseg5_nxv4f16(half* %base, i32 %offset, i32 %vl) { @@ -4099,7 +4099,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16( undef, undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4117,14 +4117,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16( undef, undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4f16( %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(half*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(,,,,,, half*, i32, i32) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4f16(,,,,,, half*, i32, , i32, i32) define @test_vlsseg6_nxv4f16(half* %base, i32 %offset, i32 %vl) { @@ -4135,7 +4135,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16( undef, undef, undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4154,14 +4154,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16( undef, undef, undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4f16( %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(half*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(,,,,,,, half*, i32, i32) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4f16(,,,,,,, half*, i32, , i32, i32) define @test_vlsseg7_nxv4f16(half* %base, i32 %offset, i32 %vl) { @@ -4172,7 +4172,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4192,14 +4192,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(half*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(,,,,,,,, half*, i32, i32) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4f16(,,,,,,,, half*, i32, , i32, i32) define @test_vlsseg8_nxv4f16(half* %base, i32 %offset, i32 %vl) { @@ -4210,7 +4210,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4231,14 +4231,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv2f16(half*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv2f16(,, half*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv2f16(,, half*, i32, , i32, i32) define @test_vlsseg2_nxv2f16(half* %base, i32 %offset, i32 %vl) { @@ -4249,7 +4249,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16( undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4264,14 +4264,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16( undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f16( %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv2f16(half*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv2f16(,,, half*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f16(,,, half*, i32, , i32, i32) define @test_vlsseg3_nxv2f16(half* %base, i32 %offset, i32 %vl) { @@ -4282,7 +4282,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16( undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4298,14 +4298,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16( undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f16( %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2f16(half*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv2f16(,,,, half*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f16(,,,, half*, i32, , i32, i32) define @test_vlsseg4_nxv2f16(half* %base, i32 %offset, i32 %vl) { @@ -4316,7 +4316,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16( undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4333,14 +4333,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16( undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f16( %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv2f16(half*, i32, i32) +declare {,,,,} @llvm.riscv.vlsseg5.nxv2f16(,,,,, half*, i32, i32) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f16(,,,,, half*, i32, , i32, i32) define @test_vlsseg5_nxv2f16(half* %base, i32 %offset, i32 %vl) { @@ -4351,7 +4351,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16( undef, undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4369,14 +4369,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16( undef, undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f16( %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(half*, i32, i32) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(,,,,,, half*, i32, i32) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f16(,,,,,, half*, i32, , i32, i32) define @test_vlsseg6_nxv2f16(half* %base, i32 %offset, i32 %vl) { @@ -4387,7 +4387,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16( undef, undef, undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4406,14 +4406,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16( undef, undef, undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f16( %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(half*, i32, i32) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(,,,,,,, half*, i32, i32) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f16(,,,,,,, half*, i32, , i32, i32) define @test_vlsseg7_nxv2f16(half* %base, i32 %offset, i32 %vl) { @@ -4424,7 +4424,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4444,14 +4444,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(half*, i32, i32) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(,,,,,,,, half*, i32, i32) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f16(,,,,,,,, half*, i32, , i32, i32) define @test_vlsseg8_nxv2f16(half* %base, i32 %offset, i32 %vl) { @@ -4462,7 +4462,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4483,14 +4483,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv4f32(float*, i32, i32) +declare {,} @llvm.riscv.vlsseg2.nxv4f32(,, float*, i32, i32) declare {,} @llvm.riscv.vlsseg2.mask.nxv4f32(,, float*, i32, , i32, i32) define @test_vlsseg2_nxv4f32(float* %base, i32 %offset, i32 %vl) { @@ -4501,7 +4501,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32( undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4516,14 +4516,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32( undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f32( %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv4f32(float*, i32, i32) +declare {,,} @llvm.riscv.vlsseg3.nxv4f32(,,, float*, i32, i32) declare {,,} @llvm.riscv.vlsseg3.mask.nxv4f32(,,, float*, i32, , i32, i32) define @test_vlsseg3_nxv4f32(float* %base, i32 %offset, i32 %vl) { @@ -4534,7 +4534,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32( undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4550,14 +4550,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32( undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4f32( %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv4f32(float*, i32, i32) +declare {,,,} @llvm.riscv.vlsseg4.nxv4f32(,,,, float*, i32, i32) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4f32(,,,, float*, i32, , i32, i32) define @test_vlsseg4_nxv4f32(float* %base, i32 %offset, i32 %vl) { @@ -4568,7 +4568,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32( undef, undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4585,7 +4585,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i32 %offset, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32( undef, undef, undef, undef, float* %base, i32 %offset, i32 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4f32( %1, %1, %1, %1, float* %base, i32 %offset, %mask, i32 %vl, i32 1) %3 = extractvalue {,,,} %2, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll @@ -2,7 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+experimental-zvfh \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,} @llvm.riscv.vlsseg2.nxv16i16(i16*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv16i16(,, i16*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv16i16(,, i16*, i64, , i64, i64) define @test_vlsseg2_nxv16i16(i16* %base, i64 %offset, i64 %vl) { @@ -13,7 +13,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16( undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -28,14 +28,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16( undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i16( %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv4i32(i32*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv4i32(,, i32*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv4i32(,, i32*, i64, , i64, i64) define @test_vlsseg2_nxv4i32(i32* %base, i64 %offset, i64 %vl) { @@ -46,7 +46,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32( undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -61,14 +61,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32( undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i32( %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv4i32(i32*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv4i32(,,, i32*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i32(,,, i32*, i64, , i64, i64) define @test_vlsseg3_nxv4i32(i32* %base, i64 %offset, i64 %vl) { @@ -79,7 +79,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32( undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -95,14 +95,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32( undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i32( %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv4i32(i32*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv4i32(,,,, i32*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i32(,,,, i32*, i64, , i64, i64) define @test_vlsseg4_nxv4i32(i32* %base, i64 %offset, i64 %vl) { @@ -113,7 +113,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32( undef, undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -130,14 +130,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32( undef, undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i32( %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv16i8(i8*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv16i8(,, i8*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv16i8(,, i8*, i64, , i64, i64) define @test_vlsseg2_nxv16i8(i8* %base, i64 %offset, i64 %vl) { @@ -148,7 +148,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8( undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -163,14 +163,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8( undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv16i8(i8*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv16i8(,,, i8*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv16i8(,,, i8*, i64, , i64, i64) define @test_vlsseg3_nxv16i8(i8* %base, i64 %offset, i64 %vl) { @@ -181,7 +181,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8( undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -197,14 +197,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8( undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv16i8( %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv16i8(i8*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv16i8(,,,, i8*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv16i8(,,,, i8*, i64, , i64, i64) define @test_vlsseg4_nxv16i8(i8* %base, i64 %offset, i64 %vl) { @@ -215,7 +215,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8( undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -232,14 +232,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8( undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv16i8( %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv1i64(i64*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv1i64(,, i64*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv1i64(,, i64*, i64, , i64, i64) define @test_vlsseg2_nxv1i64(i64* %base, i64 %offset, i64 %vl) { @@ -250,7 +250,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i64( undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -265,14 +265,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i64( undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i64( %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv1i64(i64*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv1i64(,,, i64*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i64(,,, i64*, i64, , i64, i64) define @test_vlsseg3_nxv1i64(i64* %base, i64 %offset, i64 %vl) { @@ -283,7 +283,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i64( undef, undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -299,14 +299,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i64( undef, undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i64( %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1i64(i64*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv1i64(,,,, i64*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i64(,,,, i64*, i64, , i64, i64) define @test_vlsseg4_nxv1i64(i64* %base, i64 %offset, i64 %vl) { @@ -317,7 +317,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i64( undef, undef, undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -334,14 +334,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i64( undef, undef, undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i64( %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1i64(i64*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.nxv1i64(,,,,, i64*, i64, i64) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i64(,,,,, i64*, i64, , i64, i64) define @test_vlsseg5_nxv1i64(i64* %base, i64 %offset, i64 %vl) { @@ -352,7 +352,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i64( undef, undef, undef, undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -370,14 +370,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i64( undef, undef, undef, undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i64( %1, %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i64(i64*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i64(,,,,,, i64*, i64, i64) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i64(,,,,,, i64*, i64, , i64, i64) define @test_vlsseg6_nxv1i64(i64* %base, i64 %offset, i64 %vl) { @@ -388,7 +388,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i64( undef, undef, undef, undef, undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -407,14 +407,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i64( undef, undef, undef, undef, undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i64( %1, %1, %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i64(i64*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i64(,,,,,,, i64*, i64, i64) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i64(,,,,,,, i64*, i64, , i64, i64) define @test_vlsseg7_nxv1i64(i64* %base, i64 %offset, i64 %vl) { @@ -425,7 +425,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i64( undef, undef, undef, undef, undef, undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -445,14 +445,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i64( undef, undef, undef, undef, undef, undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i64(i64*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i64(,,,,,,,, i64*, i64, i64) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i64(,,,,,,,, i64*, i64, , i64, i64) define @test_vlsseg8_nxv1i64(i64* %base, i64 %offset, i64 %vl) { @@ -463,7 +463,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -484,14 +484,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv1i32(i32*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv1i32(,, i32*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv1i32(,, i32*, i64, , i64, i64) define @test_vlsseg2_nxv1i32(i32* %base, i64 %offset, i64 %vl) { @@ -502,7 +502,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32( undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -517,14 +517,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32( undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i32( %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv1i32(i32*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv1i32(,,, i32*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i32(,,, i32*, i64, , i64, i64) define @test_vlsseg3_nxv1i32(i32* %base, i64 %offset, i64 %vl) { @@ -535,7 +535,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32( undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -551,14 +551,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32( undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i32( %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1i32(i32*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv1i32(,,,, i32*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i32(,,,, i32*, i64, , i64, i64) define @test_vlsseg4_nxv1i32(i32* %base, i64 %offset, i64 %vl) { @@ -569,7 +569,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32( undef, undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -586,14 +586,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32( undef, undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i32( %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1i32(i32*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.nxv1i32(,,,,, i32*, i64, i64) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i32(,,,,, i32*, i64, , i64, i64) define @test_vlsseg5_nxv1i32(i32* %base, i64 %offset, i64 %vl) { @@ -604,7 +604,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32( undef, undef, undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -622,14 +622,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32( undef, undef, undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i32( %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(i32*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(,,,,,, i32*, i64, i64) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i32(,,,,,, i32*, i64, , i64, i64) define @test_vlsseg6_nxv1i32(i32* %base, i64 %offset, i64 %vl) { @@ -640,7 +640,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32( undef, undef, undef, undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -659,14 +659,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32( undef, undef, undef, undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(i32*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(,,,,,,, i32*, i64, i64) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i32(,,,,,,, i32*, i64, , i64, i64) define @test_vlsseg7_nxv1i32(i32* %base, i64 %offset, i64 %vl) { @@ -677,7 +677,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -697,14 +697,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(i32*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(,,,,,,,, i32*, i64, i64) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i32(,,,,,,,, i32*, i64, , i64, i64) define @test_vlsseg8_nxv1i32(i32* %base, i64 %offset, i64 %vl) { @@ -715,7 +715,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -736,14 +736,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv8i16(i16*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv8i16(,, i16*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv8i16(,, i16*, i64, , i64, i64) define @test_vlsseg2_nxv8i16(i16* %base, i64 %offset, i64 %vl) { @@ -754,7 +754,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16( undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -769,14 +769,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16( undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i16( %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv8i16(i16*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv8i16(,,, i16*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv8i16(,,, i16*, i64, , i64, i64) define @test_vlsseg3_nxv8i16(i16* %base, i64 %offset, i64 %vl) { @@ -787,7 +787,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16( undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -803,14 +803,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16( undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8i16( %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv8i16(i16*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv8i16(,,,, i16*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8i16(,,,, i16*, i64, , i64, i64) define @test_vlsseg4_nxv8i16(i16* %base, i64 %offset, i64 %vl) { @@ -821,7 +821,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16( undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -838,14 +838,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16( undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8i16( %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv4i8(i8*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv4i8(,, i8*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv4i8(,, i8*, i64, , i64, i64) define @test_vlsseg2_nxv4i8(i8* %base, i64 %offset, i64 %vl) { @@ -856,7 +856,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8( undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -871,14 +871,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8( undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv4i8(i8*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv4i8(,,, i8*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i8(,,, i8*, i64, , i64, i64) define @test_vlsseg3_nxv4i8(i8* %base, i64 %offset, i64 %vl) { @@ -889,7 +889,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8( undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -905,14 +905,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8( undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i8( %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv4i8(i8*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv4i8(,,,, i8*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i8(,,,, i8*, i64, , i64, i64) define @test_vlsseg4_nxv4i8(i8* %base, i64 %offset, i64 %vl) { @@ -923,7 +923,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8( undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -940,14 +940,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8( undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i8( %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv4i8(i8*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.nxv4i8(,,,,, i8*, i64, i64) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i8(,,,,, i8*, i64, , i64, i64) define @test_vlsseg5_nxv4i8(i8* %base, i64 %offset, i64 %vl) { @@ -958,7 +958,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8( undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -976,14 +976,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8( undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i8( %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(i8*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(,,,,,, i8*, i64, i64) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i8(,,,,,, i8*, i64, , i64, i64) define @test_vlsseg6_nxv4i8(i8* %base, i64 %offset, i64 %vl) { @@ -994,7 +994,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8( undef, undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1013,14 +1013,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8( undef, undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(i8*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(,,,,,,, i8*, i64, i64) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i8(,,,,,,, i8*, i64, , i64, i64) define @test_vlsseg7_nxv4i8(i8* %base, i64 %offset, i64 %vl) { @@ -1031,7 +1031,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1051,14 +1051,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(i8*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(,,,,,,,, i8*, i64, i64) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i8(,,,,,,,, i8*, i64, , i64, i64) define @test_vlsseg8_nxv4i8(i8* %base, i64 %offset, i64 %vl) { @@ -1069,7 +1069,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1090,14 +1090,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv1i16(i16*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv1i16(,, i16*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv1i16(,, i16*, i64, , i64, i64) define @test_vlsseg2_nxv1i16(i16* %base, i64 %offset, i64 %vl) { @@ -1108,7 +1108,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16( undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1123,14 +1123,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16( undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i16( %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv1i16(i16*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv1i16(,,, i16*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i16(,,, i16*, i64, , i64, i64) define @test_vlsseg3_nxv1i16(i16* %base, i64 %offset, i64 %vl) { @@ -1141,7 +1141,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16( undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1157,14 +1157,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16( undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i16( %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1i16(i16*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv1i16(,,,, i16*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i16(,,,, i16*, i64, , i64, i64) define @test_vlsseg4_nxv1i16(i16* %base, i64 %offset, i64 %vl) { @@ -1175,7 +1175,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16( undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1192,14 +1192,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16( undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i16( %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1i16(i16*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.nxv1i16(,,,,, i16*, i64, i64) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i16(,,,,, i16*, i64, , i64, i64) define @test_vlsseg5_nxv1i16(i16* %base, i64 %offset, i64 %vl) { @@ -1210,7 +1210,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16( undef, undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1228,14 +1228,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16( undef, undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i16( %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(i16*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(,,,,,, i16*, i64, i64) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i16(,,,,,, i16*, i64, , i64, i64) define @test_vlsseg6_nxv1i16(i16* %base, i64 %offset, i64 %vl) { @@ -1246,7 +1246,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16( undef, undef, undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1265,14 +1265,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16( undef, undef, undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(i16*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(,,,,,,, i16*, i64, i64) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i16(,,,,,,, i16*, i64, , i64, i64) define @test_vlsseg7_nxv1i16(i16* %base, i64 %offset, i64 %vl) { @@ -1283,7 +1283,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1303,14 +1303,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(i16*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(,,,,,,,, i16*, i64, i64) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i16(,,,,,,,, i16*, i64, , i64, i64) define @test_vlsseg8_nxv1i16(i16* %base, i64 %offset, i64 %vl) { @@ -1321,7 +1321,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1342,14 +1342,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv2i32(i32*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv2i32(,, i32*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv2i32(,, i32*, i64, , i64, i64) define @test_vlsseg2_nxv2i32(i32* %base, i64 %offset, i64 %vl) { @@ -1360,7 +1360,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32( undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1375,14 +1375,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32( undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i32( %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv2i32(i32*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv2i32(,,, i32*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i32(,,, i32*, i64, , i64, i64) define @test_vlsseg3_nxv2i32(i32* %base, i64 %offset, i64 %vl) { @@ -1393,7 +1393,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32( undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1409,14 +1409,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32( undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i32( %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2i32(i32*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv2i32(,,,, i32*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i32(,,,, i32*, i64, , i64, i64) define @test_vlsseg4_nxv2i32(i32* %base, i64 %offset, i64 %vl) { @@ -1427,7 +1427,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32( undef, undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1444,14 +1444,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32( undef, undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i32( %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv2i32(i32*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.nxv2i32(,,,,, i32*, i64, i64) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i32(,,,,, i32*, i64, , i64, i64) define @test_vlsseg5_nxv2i32(i32* %base, i64 %offset, i64 %vl) { @@ -1462,7 +1462,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32( undef, undef, undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1480,14 +1480,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32( undef, undef, undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i32( %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(i32*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(,,,,,, i32*, i64, i64) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i32(,,,,,, i32*, i64, , i64, i64) define @test_vlsseg6_nxv2i32(i32* %base, i64 %offset, i64 %vl) { @@ -1498,7 +1498,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32( undef, undef, undef, undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1517,14 +1517,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32( undef, undef, undef, undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(i32*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(,,,,,,, i32*, i64, i64) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i32(,,,,,,, i32*, i64, , i64, i64) define @test_vlsseg7_nxv2i32(i32* %base, i64 %offset, i64 %vl) { @@ -1535,7 +1535,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1555,14 +1555,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(i32*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(,,,,,,,, i32*, i64, i64) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i32(,,,,,,,, i32*, i64, , i64, i64) define @test_vlsseg8_nxv2i32(i32* %base, i64 %offset, i64 %vl) { @@ -1573,7 +1573,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1594,14 +1594,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv8i8(i8*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv8i8(,, i8*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv8i8(,, i8*, i64, , i64, i64) define @test_vlsseg2_nxv8i8(i8* %base, i64 %offset, i64 %vl) { @@ -1612,7 +1612,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8( undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1627,14 +1627,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8( undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv8i8(i8*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv8i8(,,, i8*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv8i8(,,, i8*, i64, , i64, i64) define @test_vlsseg3_nxv8i8(i8* %base, i64 %offset, i64 %vl) { @@ -1645,7 +1645,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8( undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1661,14 +1661,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8( undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8i8( %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv8i8(i8*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv8i8(,,,, i8*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8i8(,,,, i8*, i64, , i64, i64) define @test_vlsseg4_nxv8i8(i8* %base, i64 %offset, i64 %vl) { @@ -1679,7 +1679,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8( undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1696,14 +1696,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8( undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8i8( %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv8i8(i8*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.nxv8i8(,,,,, i8*, i64, i64) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv8i8(,,,,, i8*, i64, , i64, i64) define @test_vlsseg5_nxv8i8(i8* %base, i64 %offset, i64 %vl) { @@ -1714,7 +1714,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8( undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1732,14 +1732,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8( undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv8i8( %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(i8*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(,,,,,, i8*, i64, i64) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv8i8(,,,,,, i8*, i64, , i64, i64) define @test_vlsseg6_nxv8i8(i8* %base, i64 %offset, i64 %vl) { @@ -1750,7 +1750,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8( undef, undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1769,14 +1769,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8( undef, undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(i8*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(,,,,,,, i8*, i64, i64) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv8i8(,,,,,,, i8*, i64, , i64, i64) define @test_vlsseg7_nxv8i8(i8* %base, i64 %offset, i64 %vl) { @@ -1787,7 +1787,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1807,14 +1807,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(i8*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(,,,,,,,, i8*, i64, i64) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv8i8(,,,,,,,, i8*, i64, , i64, i64) define @test_vlsseg8_nxv8i8(i8* %base, i64 %offset, i64 %vl) { @@ -1825,7 +1825,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1846,14 +1846,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv4i64(i64*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv4i64(,, i64*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv4i64(,, i64*, i64, , i64, i64) define @test_vlsseg2_nxv4i64(i64* %base, i64 %offset, i64 %vl) { @@ -1864,7 +1864,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i64( undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1879,14 +1879,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i64( undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i64( %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv4i16(i16*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv4i16(,, i16*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv4i16(,, i16*, i64, , i64, i64) define @test_vlsseg2_nxv4i16(i16* %base, i64 %offset, i64 %vl) { @@ -1897,7 +1897,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16( undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1912,14 +1912,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16( undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4i16( %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv4i16(i16*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv4i16(,,, i16*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv4i16(,,, i16*, i64, , i64, i64) define @test_vlsseg3_nxv4i16(i16* %base, i64 %offset, i64 %vl) { @@ -1930,7 +1930,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16( undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1946,14 +1946,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16( undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4i16( %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv4i16(i16*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv4i16(,,,, i16*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4i16(,,,, i16*, i64, , i64, i64) define @test_vlsseg4_nxv4i16(i16* %base, i64 %offset, i64 %vl) { @@ -1964,7 +1964,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16( undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1981,14 +1981,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16( undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4i16( %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv4i16(i16*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.nxv4i16(,,,,, i16*, i64, i64) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i16(,,,,, i16*, i64, , i64, i64) define @test_vlsseg5_nxv4i16(i16* %base, i64 %offset, i64 %vl) { @@ -1999,7 +1999,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16( undef, undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2017,14 +2017,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16( undef, undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4i16( %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(i16*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(,,,,,, i16*, i64, i64) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i16(,,,,,, i16*, i64, , i64, i64) define @test_vlsseg6_nxv4i16(i16* %base, i64 %offset, i64 %vl) { @@ -2035,7 +2035,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16( undef, undef, undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2054,14 +2054,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16( undef, undef, undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(i16*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(,,,,,,, i16*, i64, i64) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i16(,,,,,,, i16*, i64, , i64, i64) define @test_vlsseg7_nxv4i16(i16* %base, i64 %offset, i64 %vl) { @@ -2072,7 +2072,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2092,14 +2092,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(i16*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(,,,,,,,, i16*, i64, i64) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i16(,,,,,,,, i16*, i64, , i64, i64) define @test_vlsseg8_nxv4i16(i16* %base, i64 %offset, i64 %vl) { @@ -2110,7 +2110,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2131,14 +2131,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv1i8(i8*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv1i8(,, i8*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv1i8(,, i8*, i64, , i64, i64) define @test_vlsseg2_nxv1i8(i8* %base, i64 %offset, i64 %vl) { @@ -2149,7 +2149,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8( undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2164,14 +2164,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8( undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv1i8(i8*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv1i8(,,, i8*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv1i8(,,, i8*, i64, , i64, i64) define @test_vlsseg3_nxv1i8(i8* %base, i64 %offset, i64 %vl) { @@ -2182,7 +2182,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8( undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2198,14 +2198,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8( undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1i8( %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1i8(i8*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv1i8(,,,, i8*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1i8(,,,, i8*, i64, , i64, i64) define @test_vlsseg4_nxv1i8(i8* %base, i64 %offset, i64 %vl) { @@ -2216,7 +2216,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8( undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2233,14 +2233,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8( undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1i8( %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1i8(i8*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.nxv1i8(,,,,, i8*, i64, i64) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i8(,,,,, i8*, i64, , i64, i64) define @test_vlsseg5_nxv1i8(i8* %base, i64 %offset, i64 %vl) { @@ -2251,7 +2251,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8( undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2269,14 +2269,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8( undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1i8( %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(i8*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(,,,,,, i8*, i64, i64) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i8(,,,,,, i8*, i64, , i64, i64) define @test_vlsseg6_nxv1i8(i8* %base, i64 %offset, i64 %vl) { @@ -2287,7 +2287,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8( undef, undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2306,14 +2306,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8( undef, undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(i8*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(,,,,,,, i8*, i64, i64) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i8(,,,,,,, i8*, i64, , i64, i64) define @test_vlsseg7_nxv1i8(i8* %base, i64 %offset, i64 %vl) { @@ -2324,7 +2324,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2344,14 +2344,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(i8*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(,,,,,,,, i8*, i64, i64) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i8(,,,,,,,, i8*, i64, , i64, i64) define @test_vlsseg8_nxv1i8(i8* %base, i64 %offset, i64 %vl) { @@ -2362,7 +2362,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2383,14 +2383,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv2i8(i8*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv2i8(,, i8*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv2i8(,, i8*, i64, , i64, i64) define @test_vlsseg2_nxv2i8(i8* %base, i64 %offset, i64 %vl) { @@ -2401,7 +2401,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8( undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2416,14 +2416,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8( undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv2i8(i8*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv2i8(,,, i8*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i8(,,, i8*, i64, , i64, i64) define @test_vlsseg3_nxv2i8(i8* %base, i64 %offset, i64 %vl) { @@ -2434,7 +2434,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8( undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2450,14 +2450,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8( undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i8( %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2i8(i8*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv2i8(,,,, i8*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i8(,,,, i8*, i64, , i64, i64) define @test_vlsseg4_nxv2i8(i8* %base, i64 %offset, i64 %vl) { @@ -2468,7 +2468,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8( undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2485,14 +2485,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8( undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i8( %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv2i8(i8*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.nxv2i8(,,,,, i8*, i64, i64) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i8(,,,,, i8*, i64, , i64, i64) define @test_vlsseg5_nxv2i8(i8* %base, i64 %offset, i64 %vl) { @@ -2503,7 +2503,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8( undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2521,14 +2521,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8( undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i8( %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(i8*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(,,,,,, i8*, i64, i64) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i8(,,,,,, i8*, i64, , i64, i64) define @test_vlsseg6_nxv2i8(i8* %base, i64 %offset, i64 %vl) { @@ -2539,7 +2539,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8( undef, undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2558,14 +2558,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8( undef, undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(i8*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(,,,,,,, i8*, i64, i64) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i8(,,,,,,, i8*, i64, , i64, i64) define @test_vlsseg7_nxv2i8(i8* %base, i64 %offset, i64 %vl) { @@ -2576,7 +2576,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2596,14 +2596,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(i8*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(,,,,,,,, i8*, i64, i64) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i8(,,,,,,,, i8*, i64, , i64, i64) define @test_vlsseg8_nxv2i8(i8* %base, i64 %offset, i64 %vl) { @@ -2614,7 +2614,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2635,14 +2635,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv8i32(i32*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv8i32(,, i32*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv8i32(,, i32*, i64, , i64, i64) define @test_vlsseg2_nxv8i32(i32* %base, i64 %offset, i64 %vl) { @@ -2653,7 +2653,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32( undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2668,14 +2668,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32( undef, undef, i32* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8i32( %1, %1, i32* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv32i8(i8*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv32i8(,, i8*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv32i8(,, i8*, i64, , i64, i64) define @test_vlsseg2_nxv32i8(i8* %base, i64 %offset, i64 %vl) { @@ -2686,7 +2686,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8( undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2701,14 +2701,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8( undef, undef, i8* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv32i8( %1, %1, i8* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv2i16(i16*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv2i16(,, i16*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv2i16(,, i16*, i64, , i64, i64) define @test_vlsseg2_nxv2i16(i16* %base, i64 %offset, i64 %vl) { @@ -2719,7 +2719,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16( undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2734,14 +2734,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16( undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i16( %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv2i16(i16*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv2i16(,,, i16*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i16(,,, i16*, i64, , i64, i64) define @test_vlsseg3_nxv2i16(i16* %base, i64 %offset, i64 %vl) { @@ -2752,7 +2752,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16( undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2768,14 +2768,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16( undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i16( %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2i16(i16*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv2i16(,,,, i16*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i16(,,,, i16*, i64, , i64, i64) define @test_vlsseg4_nxv2i16(i16* %base, i64 %offset, i64 %vl) { @@ -2786,7 +2786,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16( undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2803,14 +2803,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16( undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i16( %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv2i16(i16*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.nxv2i16(,,,,, i16*, i64, i64) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i16(,,,,, i16*, i64, , i64, i64) define @test_vlsseg5_nxv2i16(i16* %base, i64 %offset, i64 %vl) { @@ -2821,7 +2821,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16( undef, undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2839,14 +2839,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16( undef, undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2i16( %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(i16*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(,,,,,, i16*, i64, i64) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i16(,,,,,, i16*, i64, , i64, i64) define @test_vlsseg6_nxv2i16(i16* %base, i64 %offset, i64 %vl) { @@ -2857,7 +2857,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16( undef, undef, undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2876,14 +2876,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16( undef, undef, undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(i16*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(,,,,,,, i16*, i64, i64) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i16(,,,,,,, i16*, i64, , i64, i64) define @test_vlsseg7_nxv2i16(i16* %base, i64 %offset, i64 %vl) { @@ -2894,7 +2894,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2914,14 +2914,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(i16*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(,,,,,,,, i16*, i64, i64) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i16(,,,,,,,, i16*, i64, , i64, i64) define @test_vlsseg8_nxv2i16(i16* %base, i64 %offset, i64 %vl) { @@ -2932,7 +2932,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2953,14 +2953,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv2i64(i64*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv2i64(,, i64*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv2i64(,, i64*, i64, , i64, i64) define @test_vlsseg2_nxv2i64(i64* %base, i64 %offset, i64 %vl) { @@ -2971,7 +2971,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i64( undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2986,14 +2986,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i64( undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2i64( %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv2i64(i64*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv2i64(,,, i64*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv2i64(,,, i64*, i64, , i64, i64) define @test_vlsseg3_nxv2i64(i64* %base, i64 %offset, i64 %vl) { @@ -3004,7 +3004,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i64( undef, undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3020,14 +3020,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i64( undef, undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2i64( %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2i64(i64*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv2i64(,,,, i64*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2i64(,,,, i64*, i64, , i64, i64) define @test_vlsseg4_nxv2i64(i64* %base, i64 %offset, i64 %vl) { @@ -3038,7 +3038,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i64( undef, undef, undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3055,14 +3055,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i64(i64* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i64( undef, undef, undef, undef, i64* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2i64( %1, %1, %1, %1, i64* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv16f16(half*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv16f16(,, half*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv16f16(,, half*, i64, , i64, i64) define @test_vlsseg2_nxv16f16(half* %base, i64 %offset, i64 %vl) { @@ -3073,7 +3073,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16( undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3088,14 +3088,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16( undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16f16( %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv4f64(double*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv4f64(,, double*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv4f64(,, double*, i64, , i64, i64) define @test_vlsseg2_nxv4f64(double* %base, i64 %offset, i64 %vl) { @@ -3106,7 +3106,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64( undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3121,14 +3121,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64( undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f64( %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv1f64(double*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv1f64(,, double*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv1f64(,, double*, i64, , i64, i64) define @test_vlsseg2_nxv1f64(double* %base, i64 %offset, i64 %vl) { @@ -3139,7 +3139,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64( undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3154,14 +3154,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64( undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f64( %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv1f64(double*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv1f64(,,, double*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f64(,,, double*, i64, , i64, i64) define @test_vlsseg3_nxv1f64(double* %base, i64 %offset, i64 %vl) { @@ -3172,7 +3172,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64( undef, undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3188,14 +3188,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64( undef, undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f64( %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1f64(double*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv1f64(,,,, double*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f64(,,,, double*, i64, , i64, i64) define @test_vlsseg4_nxv1f64(double* %base, i64 %offset, i64 %vl) { @@ -3206,7 +3206,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64( undef, undef, undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3223,14 +3223,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64( undef, undef, undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f64( %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1f64(double*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.nxv1f64(,,,,, double*, i64, i64) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f64(,,,,, double*, i64, , i64, i64) define @test_vlsseg5_nxv1f64(double* %base, i64 %offset, i64 %vl) { @@ -3241,7 +3241,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64( undef, undef, undef, undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3259,14 +3259,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64( undef, undef, undef, undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f64( %1, %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(double*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(,,,,,, double*, i64, i64) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f64(,,,,,, double*, i64, , i64, i64) define @test_vlsseg6_nxv1f64(double* %base, i64 %offset, i64 %vl) { @@ -3277,7 +3277,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64( undef, undef, undef, undef, undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3296,14 +3296,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64( undef, undef, undef, undef, undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f64( %1, %1, %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(double*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(,,,,,,, double*, i64, i64) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f64(,,,,,,, double*, i64, , i64, i64) define @test_vlsseg7_nxv1f64(double* %base, i64 %offset, i64 %vl) { @@ -3314,7 +3314,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64( undef, undef, undef, undef, undef, undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3334,14 +3334,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64( undef, undef, undef, undef, undef, undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(double*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(,,,,,,,, double*, i64, i64) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f64(,,,,,,,, double*, i64, , i64, i64) define @test_vlsseg8_nxv1f64(double* %base, i64 %offset, i64 %vl) { @@ -3352,7 +3352,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3373,14 +3373,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv2f32(float*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv2f32(,, float*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv2f32(,, float*, i64, , i64, i64) define @test_vlsseg2_nxv2f32(float* %base, i64 %offset, i64 %vl) { @@ -3391,7 +3391,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32( undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3406,14 +3406,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32( undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f32( %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv2f32(float*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv2f32(,,, float*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f32(,,, float*, i64, , i64, i64) define @test_vlsseg3_nxv2f32(float* %base, i64 %offset, i64 %vl) { @@ -3424,7 +3424,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32( undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3440,14 +3440,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32( undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f32( %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2f32(float*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv2f32(,,,, float*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f32(,,,, float*, i64, , i64, i64) define @test_vlsseg4_nxv2f32(float* %base, i64 %offset, i64 %vl) { @@ -3458,7 +3458,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32( undef, undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3475,14 +3475,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32( undef, undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f32( %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv2f32(float*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.nxv2f32(,,,,, float*, i64, i64) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f32(,,,,, float*, i64, , i64, i64) define @test_vlsseg5_nxv2f32(float* %base, i64 %offset, i64 %vl) { @@ -3493,7 +3493,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32( undef, undef, undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3511,14 +3511,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32( undef, undef, undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f32( %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(float*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(,,,,,, float*, i64, i64) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f32(,,,,,, float*, i64, , i64, i64) define @test_vlsseg6_nxv2f32(float* %base, i64 %offset, i64 %vl) { @@ -3529,7 +3529,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32( undef, undef, undef, undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3548,14 +3548,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32( undef, undef, undef, undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f32( %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(float*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(,,,,,,, float*, i64, i64) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f32(,,,,,,, float*, i64, , i64, i64) define @test_vlsseg7_nxv2f32(float* %base, i64 %offset, i64 %vl) { @@ -3566,7 +3566,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32( undef, undef, undef, undef, undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3586,14 +3586,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32( undef, undef, undef, undef, undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(float*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(,,,,,,,, float*, i64, i64) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f32(,,,,,,,, float*, i64, , i64, i64) define @test_vlsseg8_nxv2f32(float* %base, i64 %offset, i64 %vl) { @@ -3604,7 +3604,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3625,14 +3625,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv1f16(half*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv1f16(,, half*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv1f16(,, half*, i64, , i64, i64) define @test_vlsseg2_nxv1f16(half* %base, i64 %offset, i64 %vl) { @@ -3643,7 +3643,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16( undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3658,14 +3658,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16( undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f16( %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv1f16(half*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv1f16(,,, half*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f16(,,, half*, i64, , i64, i64) define @test_vlsseg3_nxv1f16(half* %base, i64 %offset, i64 %vl) { @@ -3676,7 +3676,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16( undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3692,14 +3692,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16( undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f16( %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1f16(half*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv1f16(,,,, half*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f16(,,,, half*, i64, , i64, i64) define @test_vlsseg4_nxv1f16(half* %base, i64 %offset, i64 %vl) { @@ -3710,7 +3710,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16( undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3727,14 +3727,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16( undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f16( %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1f16(half*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.nxv1f16(,,,,, half*, i64, i64) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f16(,,,,, half*, i64, , i64, i64) define @test_vlsseg5_nxv1f16(half* %base, i64 %offset, i64 %vl) { @@ -3745,7 +3745,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16( undef, undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3763,14 +3763,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16( undef, undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f16( %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(half*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(,,,,,, half*, i64, i64) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f16(,,,,,, half*, i64, , i64, i64) define @test_vlsseg6_nxv1f16(half* %base, i64 %offset, i64 %vl) { @@ -3781,7 +3781,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16( undef, undef, undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3800,14 +3800,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16( undef, undef, undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f16( %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(half*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(,,,,,,, half*, i64, i64) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f16(,,,,,,, half*, i64, , i64, i64) define @test_vlsseg7_nxv1f16(half* %base, i64 %offset, i64 %vl) { @@ -3818,7 +3818,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3838,14 +3838,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(half*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(,,,,,,,, half*, i64, i64) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f16(,,,,,,,, half*, i64, , i64, i64) define @test_vlsseg8_nxv1f16(half* %base, i64 %offset, i64 %vl) { @@ -3856,7 +3856,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3877,14 +3877,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv1f32(float*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv1f32(,, float*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv1f32(,, float*, i64, , i64, i64) define @test_vlsseg2_nxv1f32(float* %base, i64 %offset, i64 %vl) { @@ -3895,7 +3895,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32( undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3910,14 +3910,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32( undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv1f32( %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv1f32(float*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv1f32(,,, float*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv1f32(,,, float*, i64, , i64, i64) define @test_vlsseg3_nxv1f32(float* %base, i64 %offset, i64 %vl) { @@ -3928,7 +3928,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32( undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3944,14 +3944,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32( undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv1f32( %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv1f32(float*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv1f32(,,,, float*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv1f32(,,,, float*, i64, , i64, i64) define @test_vlsseg4_nxv1f32(float* %base, i64 %offset, i64 %vl) { @@ -3962,7 +3962,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32( undef, undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3979,14 +3979,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32( undef, undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv1f32( %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv1f32(float*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.nxv1f32(,,,,, float*, i64, i64) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f32(,,,,, float*, i64, , i64, i64) define @test_vlsseg5_nxv1f32(float* %base, i64 %offset, i64 %vl) { @@ -3997,7 +3997,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32( undef, undef, undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4015,14 +4015,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32( undef, undef, undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv1f32( %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(float*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(,,,,,, float*, i64, i64) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f32(,,,,,, float*, i64, , i64, i64) define @test_vlsseg6_nxv1f32(float* %base, i64 %offset, i64 %vl) { @@ -4033,7 +4033,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32( undef, undef, undef, undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4052,14 +4052,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32( undef, undef, undef, undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv1f32( %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(float*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(,,,,,,, float*, i64, i64) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f32(,,,,,,, float*, i64, , i64, i64) define @test_vlsseg7_nxv1f32(float* %base, i64 %offset, i64 %vl) { @@ -4070,7 +4070,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32( undef, undef, undef, undef, undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4090,14 +4090,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32( undef, undef, undef, undef, undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(float*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(,,,,,,,, float*, i64, i64) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f32(,,,,,,,, float*, i64, , i64, i64) define @test_vlsseg8_nxv1f32(float* %base, i64 %offset, i64 %vl) { @@ -4108,7 +4108,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4129,14 +4129,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv1f32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv8f16(half*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv8f16(,, half*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv8f16(,, half*, i64, , i64, i64) define @test_vlsseg2_nxv8f16(half* %base, i64 %offset, i64 %vl) { @@ -4147,7 +4147,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16( undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4162,14 +4162,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16( undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8f16( %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv8f16(half*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv8f16(,,, half*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv8f16(,,, half*, i64, , i64, i64) define @test_vlsseg3_nxv8f16(half* %base, i64 %offset, i64 %vl) { @@ -4180,7 +4180,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16( undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4196,14 +4196,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16( undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv8f16( %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv8f16(half*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv8f16(,,,, half*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv8f16(,,,, half*, i64, , i64, i64) define @test_vlsseg4_nxv8f16(half* %base, i64 %offset, i64 %vl) { @@ -4214,7 +4214,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16( undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4231,14 +4231,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16( undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv8f16( %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv8f32(float*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv8f32(,, float*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv8f32(,, float*, i64, , i64, i64) define @test_vlsseg2_nxv8f32(float* %base, i64 %offset, i64 %vl) { @@ -4249,7 +4249,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32( undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4264,14 +4264,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32( undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv8f32( %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv2f64(double*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv2f64(,, double*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv2f64(,, double*, i64, , i64, i64) define @test_vlsseg2_nxv2f64(double* %base, i64 %offset, i64 %vl) { @@ -4282,7 +4282,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64( undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4297,14 +4297,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64( undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f64( %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv2f64(double*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv2f64(,,, double*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f64(,,, double*, i64, , i64, i64) define @test_vlsseg3_nxv2f64(double* %base, i64 %offset, i64 %vl) { @@ -4315,7 +4315,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64( undef, undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4331,14 +4331,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64( undef, undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f64( %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2f64(double*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv2f64(,,,, double*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f64(,,,, double*, i64, , i64, i64) define @test_vlsseg4_nxv2f64(double* %base, i64 %offset, i64 %vl) { @@ -4349,7 +4349,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64( undef, undef, undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4366,14 +4366,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64( undef, undef, undef, undef, double* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f64( %1, %1, %1, %1, double* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv4f16(half*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv4f16(,, half*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv4f16(,, half*, i64, , i64, i64) define @test_vlsseg2_nxv4f16(half* %base, i64 %offset, i64 %vl) { @@ -4384,7 +4384,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16( undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4399,14 +4399,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16( undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f16( %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv4f16(half*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv4f16(,,, half*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv4f16(,,, half*, i64, , i64, i64) define @test_vlsseg3_nxv4f16(half* %base, i64 %offset, i64 %vl) { @@ -4417,7 +4417,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16( undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4433,14 +4433,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16( undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4f16( %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv4f16(half*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv4f16(,,,, half*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4f16(,,,, half*, i64, , i64, i64) define @test_vlsseg4_nxv4f16(half* %base, i64 %offset, i64 %vl) { @@ -4451,7 +4451,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16( undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4468,14 +4468,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16( undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4f16( %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv4f16(half*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.nxv4f16(,,,,, half*, i64, i64) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv4f16(,,,,, half*, i64, , i64, i64) define @test_vlsseg5_nxv4f16(half* %base, i64 %offset, i64 %vl) { @@ -4486,7 +4486,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16( undef, undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4504,14 +4504,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16( undef, undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv4f16( %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(half*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(,,,,,, half*, i64, i64) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4f16(,,,,,, half*, i64, , i64, i64) define @test_vlsseg6_nxv4f16(half* %base, i64 %offset, i64 %vl) { @@ -4522,7 +4522,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16( undef, undef, undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4541,14 +4541,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16( undef, undef, undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv4f16( %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(half*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(,,,,,,, half*, i64, i64) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4f16(,,,,,,, half*, i64, , i64, i64) define @test_vlsseg7_nxv4f16(half* %base, i64 %offset, i64 %vl) { @@ -4559,7 +4559,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4579,14 +4579,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(half*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(,,,,,,,, half*, i64, i64) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4f16(,,,,,,,, half*, i64, , i64, i64) define @test_vlsseg8_nxv4f16(half* %base, i64 %offset, i64 %vl) { @@ -4597,7 +4597,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4618,14 +4618,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv4f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv2f16(half*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv2f16(,, half*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv2f16(,, half*, i64, , i64, i64) define @test_vlsseg2_nxv2f16(half* %base, i64 %offset, i64 %vl) { @@ -4636,7 +4636,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16( undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4651,14 +4651,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16( undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv2f16( %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv2f16(half*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv2f16(,,, half*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv2f16(,,, half*, i64, , i64, i64) define @test_vlsseg3_nxv2f16(half* %base, i64 %offset, i64 %vl) { @@ -4669,7 +4669,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16( undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4685,14 +4685,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16( undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv2f16( %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv2f16(half*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv2f16(,,,, half*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv2f16(,,,, half*, i64, , i64, i64) define @test_vlsseg4_nxv2f16(half* %base, i64 %offset, i64 %vl) { @@ -4703,7 +4703,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16( undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4720,14 +4720,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16( undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv2f16( %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 ret %3 } -declare {,,,,} @llvm.riscv.vlsseg5.nxv2f16(half*, i64, i64) +declare {,,,,} @llvm.riscv.vlsseg5.nxv2f16(,,,,, half*, i64, i64) declare {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f16(,,,,, half*, i64, , i64, i64) define @test_vlsseg5_nxv2f16(half* %base, i64 %offset, i64 %vl) { @@ -4738,7 +4738,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16( undef, undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4756,14 +4756,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16( undef, undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,} %0, 0 %2 = tail call {,,,,} @llvm.riscv.vlsseg5.mask.nxv2f16( %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,} %2, 1 ret %3 } -declare {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(half*, i64, i64) +declare {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(,,,,,, half*, i64, i64) declare {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f16(,,,,,, half*, i64, , i64, i64) define @test_vlsseg6_nxv2f16(half* %base, i64 %offset, i64 %vl) { @@ -4774,7 +4774,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16( undef, undef, undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4793,14 +4793,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16( undef, undef, undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,} %0, 0 %2 = tail call {,,,,,} @llvm.riscv.vlsseg6.mask.nxv2f16( %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,} %2, 1 ret %3 } -declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(half*, i64, i64) +declare {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(,,,,,,, half*, i64, i64) declare {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f16(,,,,,,, half*, i64, , i64, i64) define @test_vlsseg7_nxv2f16(half* %base, i64 %offset, i64 %vl) { @@ -4811,7 +4811,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4831,14 +4831,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16( undef, undef, undef, undef, undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,} %0, 0 %2 = tail call {,,,,,,} @llvm.riscv.vlsseg7.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,} %2, 1 ret %3 } -declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(half*, i64, i64) +declare {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(,,,,,,,, half*, i64, i64) declare {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f16(,,,,,,,, half*, i64, , i64, i64) define @test_vlsseg8_nxv2f16(half* %base, i64 %offset, i64 %vl) { @@ -4849,7 +4849,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4870,14 +4870,14 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 0 %2 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.mask.nxv2f16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,,,,,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv4f32(float*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv4f32(,, float*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv4f32(,, float*, i64, , i64, i64) define @test_vlsseg2_nxv4f32(float* %base, i64 %offset, i64 %vl) { @@ -4888,7 +4888,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32( undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4903,14 +4903,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32( undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv4f32( %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,,} @llvm.riscv.vlsseg3.nxv4f32(float*, i64, i64) +declare {,,} @llvm.riscv.vlsseg3.nxv4f32(,,, float*, i64, i64) declare {,,} @llvm.riscv.vlsseg3.mask.nxv4f32(,,, float*, i64, , i64, i64) define @test_vlsseg3_nxv4f32(float* %base, i64 %offset, i64 %vl) { @@ -4921,7 +4921,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32( undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4937,14 +4937,14 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32( undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,} %0, 0 %2 = tail call {,,} @llvm.riscv.vlsseg3.mask.nxv4f32( %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,} %2, 1 ret %3 } -declare {,,,} @llvm.riscv.vlsseg4.nxv4f32(float*, i64, i64) +declare {,,,} @llvm.riscv.vlsseg4.nxv4f32(,,,, float*, i64, i64) declare {,,,} @llvm.riscv.vlsseg4.mask.nxv4f32(,,,, float*, i64, , i64, i64) define @test_vlsseg4_nxv4f32(float* %base, i64 %offset, i64 %vl) { @@ -4955,7 +4955,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32( undef, undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4972,7 +4972,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i64 %offset, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32( undef, undef, undef, undef, float* %base, i64 %offset, i64 %vl) %1 = extractvalue {,,,} %0, 0 %2 = tail call {,,,} @llvm.riscv.vlsseg4.mask.nxv4f32( %1, %1, %1, %1, float* %base, i64 %offset, %mask, i64 %vl, i64 1) %3 = extractvalue {,,,} %2, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll @@ -2,7 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+experimental-zvfh \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(,, i16*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i32, i32) define @test_vluxseg2_nxv16i16_nxv16i16(i16* %base, %index, i32 %vl) { @@ -13,7 +13,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -32,7 +32,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(i16*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(,, i16*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(,, i16*, , , i32, i32) define @test_vluxseg2_nxv16i16_nxv16i8(i16* %base, %index, i32 %vl) { @@ -43,7 +43,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -62,7 +62,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(i16*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(,, i16*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(,, i16*, , , i32, i32) define @test_vluxseg2_nxv16i16_nxv16i32(i16* %base, %index, i32 %vl) { @@ -73,7 +73,7 @@ ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -92,7 +92,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8(i8*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8(,, i8*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(,, i8*, , , i32, i32) define @test_vluxseg2_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { @@ -103,7 +103,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -122,7 +122,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32(i8*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32(,, i8*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(,, i8*, , , i32, i32) define @test_vluxseg2_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { @@ -133,7 +133,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -152,7 +152,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16(i8*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16(,, i8*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(,, i8*, , , i32, i32) define @test_vluxseg2_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { @@ -163,7 +163,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -182,7 +182,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8(i8*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8(,,, i8*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { @@ -193,7 +193,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -214,7 +214,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32(i8*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32(,,, i8*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { @@ -225,7 +225,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -246,7 +246,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16(i8*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16(,,, i8*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { @@ -257,7 +257,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -278,7 +278,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8(i8*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { @@ -289,7 +289,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -311,7 +311,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32(i8*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { @@ -322,7 +322,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -344,7 +344,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16(i8*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { @@ -355,7 +355,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -377,7 +377,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8(i8*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { @@ -388,7 +388,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -411,7 +411,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32(i8*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { @@ -422,7 +422,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -445,7 +445,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16(i8*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { @@ -456,7 +456,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -479,7 +479,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8(i8*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { @@ -490,7 +490,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -514,7 +514,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32(i8*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { @@ -525,7 +525,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -549,7 +549,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16(i8*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { @@ -560,7 +560,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -584,7 +584,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { @@ -595,7 +595,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -620,7 +620,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { @@ -631,7 +631,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -656,7 +656,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { @@ -667,7 +667,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -692,7 +692,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { @@ -703,7 +703,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -729,7 +729,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { @@ -740,7 +740,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -766,7 +766,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { @@ -777,7 +777,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -803,7 +803,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(i8*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(,, i8*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16(,, i8*, , , i32, i32) define @test_vluxseg2_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl) { @@ -814,7 +814,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -833,7 +833,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(i8*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(,, i8*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(,, i8*, , , i32, i32) define @test_vluxseg2_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl) { @@ -844,7 +844,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -863,7 +863,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(i8*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(,, i8*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(,, i8*, , , i32, i32) define @test_vluxseg2_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl) { @@ -874,7 +874,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -893,7 +893,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(i8*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(,,, i8*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl) { @@ -904,7 +904,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -924,7 +924,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(i8*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(,,, i8*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl) { @@ -935,7 +935,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -956,7 +956,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(i8*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(,,, i8*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl) { @@ -967,7 +967,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -987,7 +987,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(i8*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl) { @@ -998,7 +998,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1020,7 +1020,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(i8*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl) { @@ -1031,7 +1031,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1053,7 +1053,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(i8*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl) { @@ -1064,7 +1064,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1085,7 +1085,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(i32*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(,, i32*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(,, i32*, , , i32, i32) define @test_vluxseg2_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { @@ -1096,7 +1096,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1115,7 +1115,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(i32*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(,, i32*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(,, i32*, , , i32, i32) define @test_vluxseg2_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { @@ -1126,7 +1126,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1145,7 +1145,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(i32*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(,, i32*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(,, i32*, , , i32, i32) define @test_vluxseg2_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { @@ -1156,7 +1156,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1175,7 +1175,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(i32*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(,,, i32*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(,,, i32*, , , i32, i32) define @test_vluxseg3_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { @@ -1186,7 +1186,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32( undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1207,7 +1207,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(i32*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(,,, i32*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(,,, i32*, , , i32, i32) define @test_vluxseg3_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { @@ -1218,7 +1218,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8( undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1239,7 +1239,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(i32*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(,,, i32*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(,,, i32*, , , i32, i32) define @test_vluxseg3_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { @@ -1250,7 +1250,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16( undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1271,7 +1271,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(i32*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(,,,, i32*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32(,,,, i32*, , , i32, i32) define @test_vluxseg4_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { @@ -1282,7 +1282,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32( undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1304,7 +1304,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(i32*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(,,,, i32*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8(,,,, i32*, , , i32, i32) define @test_vluxseg4_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { @@ -1315,7 +1315,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8( undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1337,7 +1337,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(i32*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(,,,, i32*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16(,,,, i32*, , , i32, i32) define @test_vluxseg4_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { @@ -1348,7 +1348,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16( undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1370,7 +1370,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(i32*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(,,,,, i32*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32(,,,,, i32*, , , i32, i32) define @test_vluxseg5_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { @@ -1381,7 +1381,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1404,7 +1404,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(i32*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(,,,,, i32*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8(,,,,, i32*, , , i32, i32) define @test_vluxseg5_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { @@ -1415,7 +1415,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1438,7 +1438,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(i32*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(,,,,, i32*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16(,,,,, i32*, , , i32, i32) define @test_vluxseg5_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { @@ -1449,7 +1449,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1472,7 +1472,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(i32*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(,,,,,, i32*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32(,,,,,, i32*, , , i32, i32) define @test_vluxseg6_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { @@ -1483,7 +1483,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1507,7 +1507,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(i32*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(,,,,,, i32*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8(,,,,,, i32*, , , i32, i32) define @test_vluxseg6_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { @@ -1518,7 +1518,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1542,7 +1542,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(i32*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(,,,,,, i32*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16(,,,,,, i32*, , , i32, i32) define @test_vluxseg6_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { @@ -1553,7 +1553,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1577,7 +1577,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(i32*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(,,,,,,, i32*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32(,,,,,,, i32*, , , i32, i32) define @test_vluxseg7_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { @@ -1588,7 +1588,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1613,7 +1613,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(i32*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(,,,,,,, i32*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8(,,,,,,, i32*, , , i32, i32) define @test_vluxseg7_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { @@ -1624,7 +1624,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1649,7 +1649,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(i32*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(,,,,,,, i32*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16(,,,,,,, i32*, , , i32, i32) define @test_vluxseg7_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { @@ -1660,7 +1660,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1685,7 +1685,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(i32*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(,,,,,,,, i32*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, i32*, , , i32, i32) define @test_vluxseg8_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { @@ -1696,7 +1696,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1722,7 +1722,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(i32*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(,,,,,,,, i32*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, i32*, , , i32, i32) define @test_vluxseg8_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { @@ -1733,7 +1733,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1759,7 +1759,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(i32*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(,,,,,,,, i32*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, i32*, , , i32, i32) define @test_vluxseg8_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { @@ -1770,7 +1770,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1796,7 +1796,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(i16*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(,, i16*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16(,, i16*, , , i32, i32) define @test_vluxseg2_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { @@ -1807,7 +1807,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1826,7 +1826,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(i16*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(,, i16*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(,, i16*, , , i32, i32) define @test_vluxseg2_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { @@ -1837,7 +1837,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1856,7 +1856,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(i16*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(,, i16*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(,, i16*, , , i32, i32) define @test_vluxseg2_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { @@ -1867,7 +1867,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1886,7 +1886,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(i16*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(,,, i16*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { @@ -1897,7 +1897,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1918,7 +1918,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(i16*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(,,, i16*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { @@ -1929,7 +1929,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1950,7 +1950,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(i16*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(,,, i16*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { @@ -1961,7 +1961,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1981,7 +1981,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(i16*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { @@ -1992,7 +1992,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2014,7 +2014,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(i16*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { @@ -2025,7 +2025,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2047,7 +2047,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(i16*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { @@ -2058,7 +2058,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2080,7 +2080,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(i16*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(,,,,, i16*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16(,,,,, i16*, , , i32, i32) define @test_vluxseg5_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { @@ -2091,7 +2091,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2114,7 +2114,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(i16*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(,,,,, i16*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8(,,,,, i16*, , , i32, i32) define @test_vluxseg5_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { @@ -2125,7 +2125,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2148,7 +2148,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(i16*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(,,,,, i16*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32(,,,,, i16*, , , i32, i32) define @test_vluxseg5_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { @@ -2159,7 +2159,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2182,7 +2182,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(i16*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(,,,,,, i16*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16(,,,,,, i16*, , , i32, i32) define @test_vluxseg6_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { @@ -2193,7 +2193,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2217,7 +2217,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(i16*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(,,,,,, i16*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8(,,,,,, i16*, , , i32, i32) define @test_vluxseg6_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { @@ -2228,7 +2228,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2252,7 +2252,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(i16*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(,,,,,, i16*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32(,,,,,, i16*, , , i32, i32) define @test_vluxseg6_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { @@ -2263,7 +2263,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2287,7 +2287,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(i16*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(,,,,,,, i16*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16(,,,,,,, i16*, , , i32, i32) define @test_vluxseg7_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { @@ -2298,7 +2298,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2323,7 +2323,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(i16*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(,,,,,,, i16*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8(,,,,,,, i16*, , , i32, i32) define @test_vluxseg7_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { @@ -2334,7 +2334,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2359,7 +2359,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(i16*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(,,,,,,, i16*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32(,,,,,,, i16*, , , i32, i32) define @test_vluxseg7_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { @@ -2370,7 +2370,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2395,7 +2395,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(,,,,,,,, i16*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, i16*, , , i32, i32) define @test_vluxseg8_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { @@ -2406,7 +2406,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2432,7 +2432,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(,,,,,,,, i16*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, i16*, , , i32, i32) define @test_vluxseg8_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { @@ -2443,7 +2443,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2469,7 +2469,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(,,,,,,,, i16*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, i16*, , , i32, i32) define @test_vluxseg8_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { @@ -2480,7 +2480,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2506,7 +2506,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8(i32*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8(,, i32*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8(,, i32*, , , i32, i32) define @test_vluxseg2_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { @@ -2517,7 +2517,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2536,7 +2536,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32(i32*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32(,, i32*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(,, i32*, , , i32, i32) define @test_vluxseg2_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { @@ -2547,7 +2547,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2566,7 +2566,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(i32*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(,, i32*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(,, i32*, , , i32, i32) define @test_vluxseg2_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { @@ -2577,7 +2577,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2596,7 +2596,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8(i32*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8(,,, i32*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(,,, i32*, , , i32, i32) define @test_vluxseg3_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { @@ -2607,7 +2607,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8( undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2628,7 +2628,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32(i32*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32(,,, i32*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(,,, i32*, , , i32, i32) define @test_vluxseg3_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { @@ -2639,7 +2639,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32( undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2660,7 +2660,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(i32*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(,,, i32*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(,,, i32*, , , i32, i32) define @test_vluxseg3_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { @@ -2671,7 +2671,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16( undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2692,7 +2692,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8(i32*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8(,,,, i32*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8(,,,, i32*, , , i32, i32) define @test_vluxseg4_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { @@ -2703,7 +2703,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8( undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2725,7 +2725,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32(i32*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32(,,,, i32*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32(,,,, i32*, , , i32, i32) define @test_vluxseg4_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { @@ -2736,7 +2736,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32( undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2758,7 +2758,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(i32*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(,,,, i32*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16(,,,, i32*, , , i32, i32) define @test_vluxseg4_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { @@ -2769,7 +2769,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16( undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2791,7 +2791,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8(i32*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8(,,,,, i32*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8(,,,,, i32*, , , i32, i32) define @test_vluxseg5_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { @@ -2802,7 +2802,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2825,7 +2825,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32(i32*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32(,,,,, i32*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32(,,,,, i32*, , , i32, i32) define @test_vluxseg5_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { @@ -2836,7 +2836,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2859,7 +2859,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(i32*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(,,,,, i32*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16(,,,,, i32*, , , i32, i32) define @test_vluxseg5_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { @@ -2870,7 +2870,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2893,7 +2893,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8(i32*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8(,,,,,, i32*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8(,,,,,, i32*, , , i32, i32) define @test_vluxseg6_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { @@ -2904,7 +2904,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2928,7 +2928,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32(i32*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32(,,,,,, i32*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32(,,,,,, i32*, , , i32, i32) define @test_vluxseg6_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { @@ -2939,7 +2939,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2963,7 +2963,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(i32*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(,,,,,, i32*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16(,,,,,, i32*, , , i32, i32) define @test_vluxseg6_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { @@ -2974,7 +2974,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2998,7 +2998,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8(i32*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8(,,,,,,, i32*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8(,,,,,,, i32*, , , i32, i32) define @test_vluxseg7_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { @@ -3009,7 +3009,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3034,7 +3034,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32(i32*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32(,,,,,,, i32*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32(,,,,,,, i32*, , , i32, i32) define @test_vluxseg7_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { @@ -3045,7 +3045,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3070,7 +3070,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(i32*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(,,,,,,, i32*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16(,,,,,,, i32*, , , i32, i32) define @test_vluxseg7_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { @@ -3081,7 +3081,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3106,7 +3106,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8(i32*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8(,,,,,,,, i32*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, i32*, , , i32, i32) define @test_vluxseg8_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { @@ -3117,7 +3117,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3143,7 +3143,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32(i32*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32(,,,,,,,, i32*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, i32*, , , i32, i32) define @test_vluxseg8_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { @@ -3154,7 +3154,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3180,7 +3180,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(i32*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(,,,,,,,, i32*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, i32*, , , i32, i32) define @test_vluxseg8_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { @@ -3191,7 +3191,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3217,7 +3217,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(i16*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(,, i16*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16(,, i16*, , , i32, i32) define @test_vluxseg2_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl) { @@ -3228,7 +3228,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3247,7 +3247,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(i16*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(,, i16*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8(,, i16*, , , i32, i32) define @test_vluxseg2_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl) { @@ -3258,7 +3258,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3277,7 +3277,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(i16*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(,, i16*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32(,, i16*, , , i32, i32) define @test_vluxseg2_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl) { @@ -3288,7 +3288,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3307,7 +3307,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(i16*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(,,, i16*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl) { @@ -3318,7 +3318,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3339,7 +3339,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(i16*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(,,, i16*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl) { @@ -3350,7 +3350,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3371,7 +3371,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(i16*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(,,, i16*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl) { @@ -3382,7 +3382,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3402,7 +3402,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(i16*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl) { @@ -3413,7 +3413,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3435,7 +3435,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(i16*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl) { @@ -3446,7 +3446,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3468,7 +3468,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(i16*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl) { @@ -3479,7 +3479,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3501,7 +3501,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(i8*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(,, i8*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(,, i8*, , , i32, i32) define @test_vluxseg2_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { @@ -3512,7 +3512,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3531,7 +3531,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(i8*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(,, i8*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8(,, i8*, , , i32, i32) define @test_vluxseg2_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { @@ -3542,7 +3542,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3561,7 +3561,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(i8*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(,, i8*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32(,, i8*, , , i32, i32) define @test_vluxseg2_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { @@ -3572,7 +3572,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3591,7 +3591,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(i8*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(,,, i8*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { @@ -3602,7 +3602,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3622,7 +3622,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(i8*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(,,, i8*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { @@ -3633,7 +3633,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3654,7 +3654,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(i8*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(,,, i8*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { @@ -3665,7 +3665,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3685,7 +3685,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(i8*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { @@ -3696,7 +3696,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3718,7 +3718,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(i8*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { @@ -3729,7 +3729,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3751,7 +3751,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(i8*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { @@ -3762,7 +3762,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3783,7 +3783,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(i8*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { @@ -3794,7 +3794,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3817,7 +3817,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(i8*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { @@ -3828,7 +3828,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3851,7 +3851,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(i8*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { @@ -3862,7 +3862,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3884,7 +3884,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(i8*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { @@ -3895,7 +3895,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3919,7 +3919,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(i8*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { @@ -3930,7 +3930,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3954,7 +3954,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(i8*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { @@ -3965,7 +3965,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3989,7 +3989,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { @@ -4000,7 +4000,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4025,7 +4025,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { @@ -4036,7 +4036,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4061,7 +4061,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { @@ -4072,7 +4072,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4097,7 +4097,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { @@ -4108,7 +4108,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4134,7 +4134,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { @@ -4145,7 +4145,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4171,7 +4171,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { @@ -4182,7 +4182,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4208,7 +4208,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(i32*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(,, i32*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(,, i32*, , , i32, i32) define @test_vluxseg2_nxv8i32_nxv8i16(i32* %base, %index, i32 %vl) { @@ -4219,7 +4219,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4238,7 +4238,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(i32*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(,, i32*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8(,, i32*, , , i32, i32) define @test_vluxseg2_nxv8i32_nxv8i8(i32* %base, %index, i32 %vl) { @@ -4249,7 +4249,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4268,7 +4268,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(i32*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(,, i32*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32(,, i32*, , , i32, i32) define @test_vluxseg2_nxv8i32_nxv8i32(i32* %base, %index, i32 %vl) { @@ -4279,7 +4279,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4298,7 +4298,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16(i8*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16(,, i8*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(,, i8*, , , i32, i32) define @test_vluxseg2_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { @@ -4309,7 +4309,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4328,7 +4328,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8(i8*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8(,, i8*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(,, i8*, , , i32, i32) define @test_vluxseg2_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { @@ -4339,7 +4339,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4358,7 +4358,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32(i8*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32(,, i8*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(,, i8*, , , i32, i32) define @test_vluxseg2_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { @@ -4369,7 +4369,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4388,7 +4388,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16(i8*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16(,,, i8*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { @@ -4399,7 +4399,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4420,7 +4420,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8(i8*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8(,,, i8*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { @@ -4431,7 +4431,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4452,7 +4452,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32(i8*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32(,,, i8*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { @@ -4463,7 +4463,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4483,7 +4483,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16(i8*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { @@ -4494,7 +4494,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4516,7 +4516,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8(i8*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { @@ -4527,7 +4527,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4549,7 +4549,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32(i8*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { @@ -4560,7 +4560,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4582,7 +4582,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16(i8*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { @@ -4593,7 +4593,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4616,7 +4616,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8(i8*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { @@ -4627,7 +4627,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4650,7 +4650,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32(i8*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { @@ -4661,7 +4661,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4684,7 +4684,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16(i8*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { @@ -4695,7 +4695,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4719,7 +4719,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8(i8*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { @@ -4730,7 +4730,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4754,7 +4754,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32(i8*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { @@ -4765,7 +4765,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4789,7 +4789,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { @@ -4800,7 +4800,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4825,7 +4825,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { @@ -4836,7 +4836,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4861,7 +4861,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { @@ -4872,7 +4872,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4897,7 +4897,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { @@ -4908,7 +4908,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4934,7 +4934,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { @@ -4945,7 +4945,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4971,7 +4971,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { @@ -4982,7 +4982,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -5008,7 +5008,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8(i16*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8(,, i16*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8(,, i16*, , , i32, i32) define @test_vluxseg2_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { @@ -5019,7 +5019,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5038,7 +5038,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32(i16*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32(,, i16*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(,, i16*, , , i32, i32) define @test_vluxseg2_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { @@ -5049,7 +5049,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5068,7 +5068,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16(i16*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16(,, i16*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(,, i16*, , , i32, i32) define @test_vluxseg2_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { @@ -5079,7 +5079,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5098,7 +5098,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8(i16*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8(,,, i16*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { @@ -5109,7 +5109,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -5130,7 +5130,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32(i16*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32(,,, i16*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { @@ -5141,7 +5141,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -5162,7 +5162,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16(i16*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16(,,, i16*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { @@ -5173,7 +5173,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -5194,7 +5194,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8(i16*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { @@ -5205,7 +5205,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -5227,7 +5227,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32(i16*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { @@ -5238,7 +5238,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -5260,7 +5260,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16(i16*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { @@ -5271,7 +5271,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -5293,7 +5293,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8(i16*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8(,,,,, i16*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8(,,,,, i16*, , , i32, i32) define @test_vluxseg5_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { @@ -5304,7 +5304,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -5327,7 +5327,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32(i16*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32(,,,,, i16*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32(,,,,, i16*, , , i32, i32) define @test_vluxseg5_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { @@ -5338,7 +5338,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -5361,7 +5361,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16(i16*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16(,,,,, i16*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16(,,,,, i16*, , , i32, i32) define @test_vluxseg5_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { @@ -5372,7 +5372,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -5395,7 +5395,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8(i16*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8(,,,,,, i16*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8(,,,,,, i16*, , , i32, i32) define @test_vluxseg6_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { @@ -5406,7 +5406,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -5430,7 +5430,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32(i16*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32(,,,,,, i16*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32(,,,,,, i16*, , , i32, i32) define @test_vluxseg6_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { @@ -5441,7 +5441,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -5465,7 +5465,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16(i16*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16(,,,,,, i16*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16(,,,,,, i16*, , , i32, i32) define @test_vluxseg6_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { @@ -5476,7 +5476,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -5500,7 +5500,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8(i16*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8(,,,,,,, i16*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8(,,,,,,, i16*, , , i32, i32) define @test_vluxseg7_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { @@ -5511,7 +5511,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -5536,7 +5536,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32(i16*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32(,,,,,,, i16*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32(,,,,,,, i16*, , , i32, i32) define @test_vluxseg7_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { @@ -5547,7 +5547,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -5572,7 +5572,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16(i16*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16(,,,,,,, i16*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16(,,,,,,, i16*, , , i32, i32) define @test_vluxseg7_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { @@ -5583,7 +5583,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -5608,7 +5608,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8(i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8(,,,,,,,, i16*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, i16*, , , i32, i32) define @test_vluxseg8_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { @@ -5619,7 +5619,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -5645,7 +5645,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32(i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32(,,,,,,,, i16*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, i16*, , , i32, i32) define @test_vluxseg8_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { @@ -5656,7 +5656,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -5682,7 +5682,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16(i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16(,,,,,,,, i16*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, i16*, , , i32, i32) define @test_vluxseg8_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { @@ -5693,7 +5693,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -5719,7 +5719,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(i8*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(,, i8*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16(,, i8*, , , i32, i32) define @test_vluxseg2_nxv32i8_nxv32i16(i8* %base, %index, i32 %vl) { @@ -5730,7 +5730,7 @@ ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5749,7 +5749,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(i8*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(,, i8*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(,, i8*, , , i32, i32) define @test_vluxseg2_nxv32i8_nxv32i8(i8* %base, %index, i32 %vl) { @@ -5760,7 +5760,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5779,7 +5779,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32(i8*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32(,, i8*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(,, i8*, , , i32, i32) define @test_vluxseg2_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { @@ -5790,7 +5790,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5809,7 +5809,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8(i8*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8(,, i8*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(,, i8*, , , i32, i32) define @test_vluxseg2_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { @@ -5820,7 +5820,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5839,7 +5839,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16(i8*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16(,, i8*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(,, i8*, , , i32, i32) define @test_vluxseg2_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { @@ -5850,7 +5850,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16( undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5869,7 +5869,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32(i8*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32(,,, i8*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { @@ -5880,7 +5880,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -5901,7 +5901,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8(i8*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8(,,, i8*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { @@ -5912,7 +5912,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -5933,7 +5933,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16(i8*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16(,,, i8*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(,,, i8*, , , i32, i32) define @test_vluxseg3_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { @@ -5944,7 +5944,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16( undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -5965,7 +5965,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32(i8*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { @@ -5976,7 +5976,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -5998,7 +5998,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8(i8*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { @@ -6009,7 +6009,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -6031,7 +6031,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16(i8*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16(,,,, i8*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16(,,,, i8*, , , i32, i32) define @test_vluxseg4_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { @@ -6042,7 +6042,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16( undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -6064,7 +6064,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32(i8*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { @@ -6075,7 +6075,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -6098,7 +6098,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8(i8*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { @@ -6109,7 +6109,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -6132,7 +6132,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16(i8*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16(,,,,, i8*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16(,,,,, i8*, , , i32, i32) define @test_vluxseg5_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { @@ -6143,7 +6143,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -6166,7 +6166,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32(i8*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { @@ -6177,7 +6177,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -6201,7 +6201,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8(i8*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { @@ -6212,7 +6212,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -6236,7 +6236,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16(i8*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16(,,,,,, i8*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16(,,,,,, i8*, , , i32, i32) define @test_vluxseg6_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { @@ -6247,7 +6247,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -6271,7 +6271,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { @@ -6282,7 +6282,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -6307,7 +6307,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { @@ -6318,7 +6318,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -6343,7 +6343,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16(i8*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16(,,,,,,, i8*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16(,,,,,,, i8*, , , i32, i32) define @test_vluxseg7_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { @@ -6354,7 +6354,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -6379,7 +6379,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { @@ -6390,7 +6390,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -6416,7 +6416,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { @@ -6427,7 +6427,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -6453,7 +6453,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16(i8*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16(,,,,,,,, i8*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, i8*, , , i32, i32) define @test_vluxseg8_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { @@ -6464,7 +6464,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -6490,7 +6490,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32(i16*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32(,, i16*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32(,, i16*, , , i32, i32) define @test_vluxseg2_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { @@ -6501,7 +6501,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -6520,7 +6520,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8(i16*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8(,, i16*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(,, i16*, , , i32, i32) define @test_vluxseg2_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { @@ -6531,7 +6531,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -6550,7 +6550,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16(i16*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16(,, i16*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(,, i16*, , , i32, i32) define @test_vluxseg2_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { @@ -6561,7 +6561,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16( undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -6580,7 +6580,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32(i16*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32(,,, i16*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { @@ -6591,7 +6591,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -6612,7 +6612,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8(i16*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8(,,, i16*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { @@ -6623,7 +6623,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -6644,7 +6644,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16(i16*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16(,,, i16*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(,,, i16*, , , i32, i32) define @test_vluxseg3_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { @@ -6655,7 +6655,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16( undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -6676,7 +6676,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32(i16*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { @@ -6687,7 +6687,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -6709,7 +6709,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8(i16*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { @@ -6720,7 +6720,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -6742,7 +6742,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16(i16*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16(,,,, i16*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16(,,,, i16*, , , i32, i32) define @test_vluxseg4_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { @@ -6753,7 +6753,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16( undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -6775,7 +6775,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32(i16*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32(,,,,, i16*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32(,,,,, i16*, , , i32, i32) define @test_vluxseg5_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { @@ -6786,7 +6786,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -6809,7 +6809,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8(i16*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8(,,,,, i16*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8(,,,,, i16*, , , i32, i32) define @test_vluxseg5_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { @@ -6820,7 +6820,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -6843,7 +6843,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16(i16*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16(,,,,, i16*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16(,,,,, i16*, , , i32, i32) define @test_vluxseg5_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { @@ -6854,7 +6854,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -6877,7 +6877,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32(i16*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32(,,,,,, i16*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32(,,,,,, i16*, , , i32, i32) define @test_vluxseg6_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { @@ -6888,7 +6888,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -6912,7 +6912,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8(i16*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8(,,,,,, i16*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8(,,,,,, i16*, , , i32, i32) define @test_vluxseg6_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { @@ -6923,7 +6923,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -6947,7 +6947,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16(i16*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16(,,,,,, i16*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16(,,,,,, i16*, , , i32, i32) define @test_vluxseg6_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { @@ -6958,7 +6958,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -6982,7 +6982,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32(i16*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32(,,,,,,, i16*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32(,,,,,,, i16*, , , i32, i32) define @test_vluxseg7_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { @@ -6993,7 +6993,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -7018,7 +7018,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8(i16*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8(,,,,,,, i16*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8(,,,,,,, i16*, , , i32, i32) define @test_vluxseg7_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { @@ -7029,7 +7029,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -7054,7 +7054,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16(i16*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16(,,,,,,, i16*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16(,,,,,,, i16*, , , i32, i32) define @test_vluxseg7_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { @@ -7065,7 +7065,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -7090,7 +7090,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32(i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32(,,,,,,,, i16*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, i16*, , , i32, i32) define @test_vluxseg8_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { @@ -7101,7 +7101,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -7127,7 +7127,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8(i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8(,,,,,,,, i16*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, i16*, , , i32, i32) define @test_vluxseg8_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { @@ -7138,7 +7138,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -7164,7 +7164,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16(i16*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16(,,,,,,,, i16*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, i16*, , , i32, i32) define @test_vluxseg8_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { @@ -7175,7 +7175,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -7201,7 +7201,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(i32*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(,, i32*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16(,, i32*, , , i32, i32) define @test_vluxseg2_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl) { @@ -7212,7 +7212,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7231,7 +7231,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(i32*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(,, i32*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(,, i32*, , , i32, i32) define @test_vluxseg2_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl) { @@ -7242,7 +7242,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7261,7 +7261,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(i32*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(,, i32*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(,, i32*, , , i32, i32) define @test_vluxseg2_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl) { @@ -7272,7 +7272,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32( undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7291,7 +7291,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(i32*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(,,, i32*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(,,, i32*, , , i32, i32) define @test_vluxseg3_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl) { @@ -7302,7 +7302,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16( undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -7323,7 +7323,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(i32*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(,,, i32*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(,,, i32*, , , i32, i32) define @test_vluxseg3_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl) { @@ -7334,7 +7334,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8( undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -7355,7 +7355,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(i32*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(,,, i32*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(,,, i32*, , , i32, i32) define @test_vluxseg3_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl) { @@ -7366,7 +7366,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32( undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -7387,7 +7387,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(i32*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(,,,, i32*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16(,,,, i32*, , , i32, i32) define @test_vluxseg4_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl) { @@ -7398,7 +7398,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16( undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -7420,7 +7420,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(i32*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(,,,, i32*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8(,,,, i32*, , , i32, i32) define @test_vluxseg4_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl) { @@ -7431,7 +7431,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8( undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -7453,7 +7453,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(i32*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(,,,, i32*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32(,,,, i32*, , , i32, i32) define @test_vluxseg4_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl) { @@ -7464,7 +7464,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32( undef, undef, undef, undef, i32* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -7486,7 +7486,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(half*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(,, half*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16(,, half*, , , i32, i32) define @test_vluxseg2_nxv16f16_nxv16i16(half* %base, %index, i32 %vl) { @@ -7497,7 +7497,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7516,7 +7516,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(half*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(,, half*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(,, half*, , , i32, i32) define @test_vluxseg2_nxv16f16_nxv16i8(half* %base, %index, i32 %vl) { @@ -7527,7 +7527,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7546,7 +7546,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(half*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(,, half*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(,, half*, , , i32, i32) define @test_vluxseg2_nxv16f16_nxv16i32(half* %base, %index, i32 %vl) { @@ -7557,7 +7557,7 @@ ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7576,7 +7576,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(double*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(,, double*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(,, double*, , , i32, i32) define @test_vluxseg2_nxv4f64_nxv4i16(double* %base, %index, i32 %vl) { @@ -7587,7 +7587,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16( undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7606,7 +7606,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(double*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(,, double*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(,, double*, , , i32, i32) define @test_vluxseg2_nxv4f64_nxv4i8(double* %base, %index, i32 %vl) { @@ -7617,7 +7617,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8( undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7636,7 +7636,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(double*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(,, double*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(,, double*, , , i32, i32) define @test_vluxseg2_nxv4f64_nxv4i32(double* %base, %index, i32 %vl) { @@ -7647,7 +7647,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32( undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7666,7 +7666,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(double*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(,, double*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8(,, double*, , , i32, i32) define @test_vluxseg2_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { @@ -7677,7 +7677,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8( undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7696,7 +7696,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(double*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(,, double*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(,, double*, , , i32, i32) define @test_vluxseg2_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { @@ -7707,7 +7707,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32( undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7726,7 +7726,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(double*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(,, double*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(,, double*, , , i32, i32) define @test_vluxseg2_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { @@ -7737,7 +7737,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16( undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7756,7 +7756,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(double*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(,,, double*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(,,, double*, , , i32, i32) define @test_vluxseg3_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { @@ -7767,7 +7767,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8( undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -7788,7 +7788,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(double*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(,,, double*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(,,, double*, , , i32, i32) define @test_vluxseg3_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { @@ -7799,7 +7799,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32( undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -7820,7 +7820,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(double*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(,,, double*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(,,, double*, , , i32, i32) define @test_vluxseg3_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { @@ -7831,7 +7831,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16( undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -7852,7 +7852,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(double*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(,,,, double*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8(,,,, double*, , , i32, i32) define @test_vluxseg4_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { @@ -7863,7 +7863,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8( undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -7885,7 +7885,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(double*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(,,,, double*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32(,,,, double*, , , i32, i32) define @test_vluxseg4_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { @@ -7896,7 +7896,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32( undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -7918,7 +7918,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(double*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(,,,, double*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16(,,,, double*, , , i32, i32) define @test_vluxseg4_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { @@ -7929,7 +7929,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16( undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -7951,7 +7951,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(double*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(,,,,, double*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8(,,,,, double*, , , i32, i32) define @test_vluxseg5_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { @@ -7962,7 +7962,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -7985,7 +7985,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(double*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(,,,,, double*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32(,,,,, double*, , , i32, i32) define @test_vluxseg5_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { @@ -7996,7 +7996,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -8019,7 +8019,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(double*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(,,,,, double*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16(,,,,, double*, , , i32, i32) define @test_vluxseg5_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { @@ -8030,7 +8030,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -8053,7 +8053,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(double*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(,,,,,, double*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8(,,,,,, double*, , , i32, i32) define @test_vluxseg6_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { @@ -8064,7 +8064,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -8088,7 +8088,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(double*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(,,,,,, double*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32(,,,,,, double*, , , i32, i32) define @test_vluxseg6_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { @@ -8099,7 +8099,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -8123,7 +8123,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(double*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(,,,,,, double*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16(,,,,,, double*, , , i32, i32) define @test_vluxseg6_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { @@ -8134,7 +8134,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -8158,7 +8158,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(double*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(,,,,,,, double*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8(,,,,,,, double*, , , i32, i32) define @test_vluxseg7_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { @@ -8169,7 +8169,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -8194,7 +8194,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(double*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(,,,,,,, double*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32(,,,,,,, double*, , , i32, i32) define @test_vluxseg7_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { @@ -8205,7 +8205,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -8230,7 +8230,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(double*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(,,,,,,, double*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16(,,,,,,, double*, , , i32, i32) define @test_vluxseg7_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { @@ -8241,7 +8241,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -8266,7 +8266,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(double*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(,,,,,,,, double*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, double*, , , i32, i32) define @test_vluxseg8_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { @@ -8277,7 +8277,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -8303,7 +8303,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(double*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(,,,,,,,, double*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, double*, , , i32, i32) define @test_vluxseg8_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { @@ -8314,7 +8314,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -8340,7 +8340,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(double*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(,,,,,,,, double*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, double*, , , i32, i32) define @test_vluxseg8_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { @@ -8351,7 +8351,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -8377,7 +8377,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(float*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(,, float*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32(,, float*, , , i32, i32) define @test_vluxseg2_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { @@ -8388,7 +8388,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -8407,7 +8407,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(float*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(,, float*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(,, float*, , , i32, i32) define @test_vluxseg2_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { @@ -8418,7 +8418,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -8437,7 +8437,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(float*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(,, float*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(,, float*, , , i32, i32) define @test_vluxseg2_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { @@ -8448,7 +8448,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -8467,7 +8467,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(float*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(,,, float*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(,,, float*, , , i32, i32) define @test_vluxseg3_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { @@ -8478,7 +8478,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32( undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -8499,7 +8499,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(float*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(,,, float*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(,,, float*, , , i32, i32) define @test_vluxseg3_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { @@ -8510,7 +8510,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8( undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -8531,7 +8531,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(float*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(,,, float*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(,,, float*, , , i32, i32) define @test_vluxseg3_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { @@ -8542,7 +8542,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16( undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -8563,7 +8563,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(float*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(,,,, float*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32(,,,, float*, , , i32, i32) define @test_vluxseg4_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { @@ -8574,7 +8574,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32( undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -8596,7 +8596,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(float*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(,,,, float*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8(,,,, float*, , , i32, i32) define @test_vluxseg4_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { @@ -8607,7 +8607,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8( undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -8629,7 +8629,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(float*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(,,,, float*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16(,,,, float*, , , i32, i32) define @test_vluxseg4_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { @@ -8640,7 +8640,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16( undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -8662,7 +8662,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(float*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(,,,,, float*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32(,,,,, float*, , , i32, i32) define @test_vluxseg5_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { @@ -8673,7 +8673,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -8696,7 +8696,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(float*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(,,,,, float*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8(,,,,, float*, , , i32, i32) define @test_vluxseg5_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { @@ -8707,7 +8707,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -8730,7 +8730,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(float*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(,,,,, float*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16(,,,,, float*, , , i32, i32) define @test_vluxseg5_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { @@ -8741,7 +8741,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -8764,7 +8764,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(float*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(,,,,,, float*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32(,,,,,, float*, , , i32, i32) define @test_vluxseg6_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { @@ -8775,7 +8775,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -8799,7 +8799,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(float*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(,,,,,, float*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8(,,,,,, float*, , , i32, i32) define @test_vluxseg6_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { @@ -8810,7 +8810,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -8834,7 +8834,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(float*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(,,,,,, float*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16(,,,,,, float*, , , i32, i32) define @test_vluxseg6_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { @@ -8845,7 +8845,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -8869,7 +8869,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(float*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(,,,,,,, float*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32(,,,,,,, float*, , , i32, i32) define @test_vluxseg7_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { @@ -8880,7 +8880,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -8905,7 +8905,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(float*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(,,,,,,, float*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8(,,,,,,, float*, , , i32, i32) define @test_vluxseg7_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { @@ -8916,7 +8916,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -8941,7 +8941,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(float*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(,,,,,,, float*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16(,,,,,,, float*, , , i32, i32) define @test_vluxseg7_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { @@ -8952,7 +8952,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -8977,7 +8977,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(float*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(,,,,,,,, float*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, float*, , , i32, i32) define @test_vluxseg8_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { @@ -8988,7 +8988,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -9014,7 +9014,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(float*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(,,,,,,,, float*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, float*, , , i32, i32) define @test_vluxseg8_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { @@ -9025,7 +9025,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -9051,7 +9051,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(float*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(,,,,,,,, float*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, float*, , , i32, i32) define @test_vluxseg8_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { @@ -9062,7 +9062,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -9088,7 +9088,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8(half*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8(,, half*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8(,, half*, , , i32, i32) define @test_vluxseg2_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { @@ -9099,7 +9099,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9118,7 +9118,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32(half*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32(,, half*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(,, half*, , , i32, i32) define @test_vluxseg2_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { @@ -9129,7 +9129,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9148,7 +9148,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16(half*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16(,, half*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(,, half*, , , i32, i32) define @test_vluxseg2_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { @@ -9159,7 +9159,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9178,7 +9178,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8(half*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8(,,, half*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(,,, half*, , , i32, i32) define @test_vluxseg3_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { @@ -9189,7 +9189,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -9210,7 +9210,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32(half*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32(,,, half*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(,,, half*, , , i32, i32) define @test_vluxseg3_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { @@ -9221,7 +9221,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -9242,7 +9242,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16(half*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16(,,, half*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(,,, half*, , , i32, i32) define @test_vluxseg3_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { @@ -9253,7 +9253,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -9274,7 +9274,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8(half*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8(,,,, half*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { @@ -9285,7 +9285,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -9307,7 +9307,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32(half*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32(,,,, half*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { @@ -9318,7 +9318,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -9340,7 +9340,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16(half*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16(,,,, half*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { @@ -9351,7 +9351,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -9373,7 +9373,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8(half*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8(,,,,, half*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8(,,,,, half*, , , i32, i32) define @test_vluxseg5_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { @@ -9384,7 +9384,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -9407,7 +9407,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32(half*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32(,,,,, half*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32(,,,,, half*, , , i32, i32) define @test_vluxseg5_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { @@ -9418,7 +9418,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -9441,7 +9441,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16(half*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16(,,,,, half*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16(,,,,, half*, , , i32, i32) define @test_vluxseg5_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { @@ -9452,7 +9452,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -9475,7 +9475,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8(half*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8(,,,,,, half*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8(,,,,,, half*, , , i32, i32) define @test_vluxseg6_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { @@ -9486,7 +9486,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -9510,7 +9510,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32(half*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32(,,,,,, half*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32(,,,,,, half*, , , i32, i32) define @test_vluxseg6_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { @@ -9521,7 +9521,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -9545,7 +9545,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16(half*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16(,,,,,, half*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16(,,,,,, half*, , , i32, i32) define @test_vluxseg6_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { @@ -9556,7 +9556,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -9580,7 +9580,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8(half*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8(,,,,,,, half*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8(,,,,,,, half*, , , i32, i32) define @test_vluxseg7_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { @@ -9591,7 +9591,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -9616,7 +9616,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32(half*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32(,,,,,,, half*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32(,,,,,,, half*, , , i32, i32) define @test_vluxseg7_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { @@ -9627,7 +9627,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -9652,7 +9652,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16(half*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16(,,,,,,, half*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16(,,,,,,, half*, , , i32, i32) define @test_vluxseg7_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { @@ -9663,7 +9663,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -9688,7 +9688,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8(half*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8(,,,,,,,, half*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, half*, , , i32, i32) define @test_vluxseg8_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { @@ -9699,7 +9699,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -9725,7 +9725,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32(half*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32(,,,,,,,, half*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, half*, , , i32, i32) define @test_vluxseg8_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { @@ -9736,7 +9736,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -9762,7 +9762,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16(half*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16(,,,,,,,, half*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, half*, , , i32, i32) define @test_vluxseg8_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { @@ -9773,7 +9773,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -9799,7 +9799,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8(float*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8(,, float*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8(,, float*, , , i32, i32) define @test_vluxseg2_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { @@ -9810,7 +9810,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9829,7 +9829,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32(float*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32(,, float*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(,, float*, , , i32, i32) define @test_vluxseg2_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { @@ -9840,7 +9840,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9859,7 +9859,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16(float*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16(,, float*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(,, float*, , , i32, i32) define @test_vluxseg2_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { @@ -9870,7 +9870,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9889,7 +9889,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8(float*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8(,,, float*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(,,, float*, , , i32, i32) define @test_vluxseg3_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { @@ -9900,7 +9900,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8( undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -9921,7 +9921,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32(float*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32(,,, float*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(,,, float*, , , i32, i32) define @test_vluxseg3_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { @@ -9932,7 +9932,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32( undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -9953,7 +9953,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16(float*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16(,,, float*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(,,, float*, , , i32, i32) define @test_vluxseg3_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { @@ -9964,7 +9964,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16( undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -9985,7 +9985,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8(float*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8(,,,, float*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8(,,,, float*, , , i32, i32) define @test_vluxseg4_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { @@ -9996,7 +9996,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8( undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -10018,7 +10018,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32(float*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32(,,,, float*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32(,,,, float*, , , i32, i32) define @test_vluxseg4_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { @@ -10029,7 +10029,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32( undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -10051,7 +10051,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16(float*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16(,,,, float*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16(,,,, float*, , , i32, i32) define @test_vluxseg4_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { @@ -10062,7 +10062,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16( undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -10084,7 +10084,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8(float*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8(,,,,, float*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8(,,,,, float*, , , i32, i32) define @test_vluxseg5_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { @@ -10095,7 +10095,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -10118,7 +10118,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32(float*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32(,,,,, float*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32(,,,,, float*, , , i32, i32) define @test_vluxseg5_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { @@ -10129,7 +10129,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -10152,7 +10152,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16(float*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16(,,,,, float*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16(,,,,, float*, , , i32, i32) define @test_vluxseg5_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { @@ -10163,7 +10163,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -10186,7 +10186,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8(float*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8(,,,,,, float*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8(,,,,,, float*, , , i32, i32) define @test_vluxseg6_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { @@ -10197,7 +10197,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -10221,7 +10221,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32(float*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32(,,,,,, float*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32(,,,,,, float*, , , i32, i32) define @test_vluxseg6_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { @@ -10232,7 +10232,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -10256,7 +10256,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16(float*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16(,,,,,, float*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16(,,,,,, float*, , , i32, i32) define @test_vluxseg6_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { @@ -10267,7 +10267,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -10291,7 +10291,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8(float*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8(,,,,,,, float*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8(,,,,,,, float*, , , i32, i32) define @test_vluxseg7_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { @@ -10302,7 +10302,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -10327,7 +10327,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32(float*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32(,,,,,,, float*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32(,,,,,,, float*, , , i32, i32) define @test_vluxseg7_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { @@ -10338,7 +10338,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -10363,7 +10363,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16(float*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16(,,,,,,, float*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16(,,,,,,, float*, , , i32, i32) define @test_vluxseg7_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { @@ -10374,7 +10374,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -10399,7 +10399,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8(float*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8(,,,,,,,, float*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, float*, , , i32, i32) define @test_vluxseg8_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { @@ -10410,7 +10410,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -10436,7 +10436,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32(float*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32(,,,,,,,, float*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, float*, , , i32, i32) define @test_vluxseg8_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { @@ -10447,7 +10447,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -10473,7 +10473,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16(float*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16(,,,,,,,, float*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, float*, , , i32, i32) define @test_vluxseg8_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { @@ -10484,7 +10484,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -10510,7 +10510,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(half*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(,, half*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16(,, half*, , , i32, i32) define @test_vluxseg2_nxv8f16_nxv8i16(half* %base, %index, i32 %vl) { @@ -10521,7 +10521,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10540,7 +10540,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(half*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(,, half*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8(,, half*, , , i32, i32) define @test_vluxseg2_nxv8f16_nxv8i8(half* %base, %index, i32 %vl) { @@ -10551,7 +10551,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10570,7 +10570,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(half*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(,, half*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32(,, half*, , , i32, i32) define @test_vluxseg2_nxv8f16_nxv8i32(half* %base, %index, i32 %vl) { @@ -10581,7 +10581,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10600,7 +10600,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(half*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(,,, half*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(,,, half*, , , i32, i32) define @test_vluxseg3_nxv8f16_nxv8i16(half* %base, %index, i32 %vl) { @@ -10611,7 +10611,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -10632,7 +10632,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(half*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(,,, half*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8(,,, half*, , , i32, i32) define @test_vluxseg3_nxv8f16_nxv8i8(half* %base, %index, i32 %vl) { @@ -10643,7 +10643,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -10664,7 +10664,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(half*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(,,, half*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32(,,, half*, , , i32, i32) define @test_vluxseg3_nxv8f16_nxv8i32(half* %base, %index, i32 %vl) { @@ -10675,7 +10675,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -10695,7 +10695,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(half*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(,,,, half*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv8f16_nxv8i16(half* %base, %index, i32 %vl) { @@ -10706,7 +10706,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -10728,7 +10728,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(half*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(,,,, half*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv8f16_nxv8i8(half* %base, %index, i32 %vl) { @@ -10739,7 +10739,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -10761,7 +10761,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(half*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(,,,, half*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv8f16_nxv8i32(half* %base, %index, i32 %vl) { @@ -10772,7 +10772,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -10794,7 +10794,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(float*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(,, float*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(,, float*, , , i32, i32) define @test_vluxseg2_nxv8f32_nxv8i16(float* %base, %index, i32 %vl) { @@ -10805,7 +10805,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10824,7 +10824,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(float*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(,, float*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8(,, float*, , , i32, i32) define @test_vluxseg2_nxv8f32_nxv8i8(float* %base, %index, i32 %vl) { @@ -10835,7 +10835,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10854,7 +10854,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(float*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(,, float*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32(,, float*, , , i32, i32) define @test_vluxseg2_nxv8f32_nxv8i32(float* %base, %index, i32 %vl) { @@ -10865,7 +10865,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10884,7 +10884,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(double*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(,, double*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(,, double*, , , i32, i32) define @test_vluxseg2_nxv2f64_nxv2i32(double* %base, %index, i32 %vl) { @@ -10895,7 +10895,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32( undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10914,7 +10914,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(double*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(,, double*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(,, double*, , , i32, i32) define @test_vluxseg2_nxv2f64_nxv2i8(double* %base, %index, i32 %vl) { @@ -10925,7 +10925,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8( undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10944,7 +10944,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(double*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(,, double*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(,, double*, , , i32, i32) define @test_vluxseg2_nxv2f64_nxv2i16(double* %base, %index, i32 %vl) { @@ -10955,7 +10955,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16( undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10974,7 +10974,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(double*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(,,, double*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(,,, double*, , , i32, i32) define @test_vluxseg3_nxv2f64_nxv2i32(double* %base, %index, i32 %vl) { @@ -10985,7 +10985,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32( undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11006,7 +11006,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(double*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(,,, double*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(,,, double*, , , i32, i32) define @test_vluxseg3_nxv2f64_nxv2i8(double* %base, %index, i32 %vl) { @@ -11017,7 +11017,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8( undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11038,7 +11038,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(double*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(,,, double*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(,,, double*, , , i32, i32) define @test_vluxseg3_nxv2f64_nxv2i16(double* %base, %index, i32 %vl) { @@ -11049,7 +11049,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16( undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11070,7 +11070,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(double*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(,,,, double*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32(,,,, double*, , , i32, i32) define @test_vluxseg4_nxv2f64_nxv2i32(double* %base, %index, i32 %vl) { @@ -11081,7 +11081,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32( undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11103,7 +11103,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(double*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(,,,, double*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8(,,,, double*, , , i32, i32) define @test_vluxseg4_nxv2f64_nxv2i8(double* %base, %index, i32 %vl) { @@ -11114,7 +11114,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8( undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11136,7 +11136,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(double*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(,,,, double*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16(,,,, double*, , , i32, i32) define @test_vluxseg4_nxv2f64_nxv2i16(double* %base, %index, i32 %vl) { @@ -11147,7 +11147,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16( undef, undef, undef, undef, double* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11169,7 +11169,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(half*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(,, half*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(,, half*, , , i32, i32) define @test_vluxseg2_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { @@ -11180,7 +11180,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11199,7 +11199,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(half*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(,, half*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(,, half*, , , i32, i32) define @test_vluxseg2_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { @@ -11210,7 +11210,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11229,7 +11229,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(half*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(,, half*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(,, half*, , , i32, i32) define @test_vluxseg2_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { @@ -11240,7 +11240,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11259,7 +11259,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(half*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(,,, half*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(,,, half*, , , i32, i32) define @test_vluxseg3_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { @@ -11270,7 +11270,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11291,7 +11291,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(half*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(,,, half*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(,,, half*, , , i32, i32) define @test_vluxseg3_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { @@ -11302,7 +11302,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11323,7 +11323,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(half*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(,,, half*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(,,, half*, , , i32, i32) define @test_vluxseg3_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { @@ -11334,7 +11334,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11354,7 +11354,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(half*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(,,,, half*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { @@ -11365,7 +11365,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11387,7 +11387,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(half*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(,,,, half*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { @@ -11398,7 +11398,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11420,7 +11420,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(half*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(,,,, half*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { @@ -11431,7 +11431,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11453,7 +11453,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(half*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(,,,,, half*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16(,,,,, half*, , , i32, i32) define @test_vluxseg5_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { @@ -11464,7 +11464,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -11487,7 +11487,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(half*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(,,,,, half*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8(,,,,, half*, , , i32, i32) define @test_vluxseg5_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { @@ -11498,7 +11498,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -11521,7 +11521,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(half*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(,,,,, half*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32(,,,,, half*, , , i32, i32) define @test_vluxseg5_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { @@ -11532,7 +11532,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -11555,7 +11555,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(half*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(,,,,,, half*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16(,,,,,, half*, , , i32, i32) define @test_vluxseg6_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { @@ -11566,7 +11566,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -11590,7 +11590,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(half*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(,,,,,, half*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8(,,,,,, half*, , , i32, i32) define @test_vluxseg6_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { @@ -11601,7 +11601,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -11625,7 +11625,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(half*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(,,,,,, half*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32(,,,,,, half*, , , i32, i32) define @test_vluxseg6_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { @@ -11636,7 +11636,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -11660,7 +11660,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(half*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(,,,,,,, half*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16(,,,,,,, half*, , , i32, i32) define @test_vluxseg7_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { @@ -11671,7 +11671,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -11696,7 +11696,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(half*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(,,,,,,, half*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8(,,,,,,, half*, , , i32, i32) define @test_vluxseg7_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { @@ -11707,7 +11707,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -11732,7 +11732,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(half*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(,,,,,,, half*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32(,,,,,,, half*, , , i32, i32) define @test_vluxseg7_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { @@ -11743,7 +11743,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -11768,7 +11768,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(half*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(,,,,,,,, half*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, half*, , , i32, i32) define @test_vluxseg8_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { @@ -11779,7 +11779,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -11805,7 +11805,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(half*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(,,,,,,,, half*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, half*, , , i32, i32) define @test_vluxseg8_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { @@ -11816,7 +11816,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -11842,7 +11842,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(half*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(,,,,,,,, half*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, half*, , , i32, i32) define @test_vluxseg8_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { @@ -11853,7 +11853,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -11879,7 +11879,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32(half*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32(,, half*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32(,, half*, , , i32, i32) define @test_vluxseg2_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { @@ -11890,7 +11890,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11909,7 +11909,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8(half*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8(,, half*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(,, half*, , , i32, i32) define @test_vluxseg2_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { @@ -11920,7 +11920,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11939,7 +11939,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16(half*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16(,, half*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(,, half*, , , i32, i32) define @test_vluxseg2_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { @@ -11950,7 +11950,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16( undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11969,7 +11969,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32(half*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32(,,, half*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(,,, half*, , , i32, i32) define @test_vluxseg3_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { @@ -11980,7 +11980,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -12001,7 +12001,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8(half*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8(,,, half*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(,,, half*, , , i32, i32) define @test_vluxseg3_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { @@ -12012,7 +12012,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -12033,7 +12033,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16(half*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16(,,, half*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(,,, half*, , , i32, i32) define @test_vluxseg3_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { @@ -12044,7 +12044,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16( undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -12065,7 +12065,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32(half*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32(,,,, half*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { @@ -12076,7 +12076,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -12098,7 +12098,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8(half*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8(,,,, half*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { @@ -12109,7 +12109,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -12131,7 +12131,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16(half*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16(,,,, half*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16(,,,, half*, , , i32, i32) define @test_vluxseg4_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { @@ -12142,7 +12142,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16( undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -12164,7 +12164,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32(half*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32(,,,,, half*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32(,,,,, half*, , , i32, i32) define @test_vluxseg5_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { @@ -12175,7 +12175,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -12198,7 +12198,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8(half*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8(,,,,, half*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8(,,,,, half*, , , i32, i32) define @test_vluxseg5_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { @@ -12209,7 +12209,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -12232,7 +12232,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16(half*, , i32) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16(,,,,, half*, , i32) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16(,,,,, half*, , , i32, i32) define @test_vluxseg5_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { @@ -12243,7 +12243,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -12266,7 +12266,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32(half*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32(,,,,,, half*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32(,,,,,, half*, , , i32, i32) define @test_vluxseg6_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { @@ -12277,7 +12277,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -12301,7 +12301,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8(half*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8(,,,,,, half*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8(,,,,,, half*, , , i32, i32) define @test_vluxseg6_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { @@ -12312,7 +12312,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -12336,7 +12336,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16(half*, , i32) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16(,,,,,, half*, , i32) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16(,,,,,, half*, , , i32, i32) define @test_vluxseg6_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { @@ -12347,7 +12347,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -12371,7 +12371,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32(half*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32(,,,,,,, half*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32(,,,,,,, half*, , , i32, i32) define @test_vluxseg7_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { @@ -12382,7 +12382,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -12407,7 +12407,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8(half*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8(,,,,,,, half*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8(,,,,,,, half*, , , i32, i32) define @test_vluxseg7_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { @@ -12418,7 +12418,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -12443,7 +12443,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16(half*, , i32) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16(,,,,,,, half*, , i32) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16(,,,,,,, half*, , , i32, i32) define @test_vluxseg7_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { @@ -12454,7 +12454,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -12479,7 +12479,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32(half*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32(,,,,,,,, half*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, half*, , , i32, i32) define @test_vluxseg8_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { @@ -12490,7 +12490,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -12516,7 +12516,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8(half*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8(,,,,,,,, half*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, half*, , , i32, i32) define @test_vluxseg8_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { @@ -12527,7 +12527,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -12553,7 +12553,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16(half*, , i32) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16(,,,,,,,, half*, , i32) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, half*, , , i32, i32) define @test_vluxseg8_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { @@ -12564,7 +12564,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -12590,7 +12590,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(float*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(,, float*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16(,, float*, , , i32, i32) define @test_vluxseg2_nxv4f32_nxv4i16(float* %base, %index, i32 %vl) { @@ -12601,7 +12601,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -12620,7 +12620,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(float*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(,, float*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(,, float*, , , i32, i32) define @test_vluxseg2_nxv4f32_nxv4i8(float* %base, %index, i32 %vl) { @@ -12631,7 +12631,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -12650,7 +12650,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(float*, , i32) +declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(,, float*, , i32) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(,, float*, , , i32, i32) define @test_vluxseg2_nxv4f32_nxv4i32(float* %base, %index, i32 %vl) { @@ -12661,7 +12661,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32( undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -12680,7 +12680,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(float*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(,,, float*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(,,, float*, , , i32, i32) define @test_vluxseg3_nxv4f32_nxv4i16(float* %base, %index, i32 %vl) { @@ -12691,7 +12691,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16( undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -12712,7 +12712,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(float*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(,,, float*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(,,, float*, , , i32, i32) define @test_vluxseg3_nxv4f32_nxv4i8(float* %base, %index, i32 %vl) { @@ -12723,7 +12723,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8( undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -12744,7 +12744,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(float*, , i32) +declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(,,, float*, , i32) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(,,, float*, , , i32, i32) define @test_vluxseg3_nxv4f32_nxv4i32(float* %base, %index, i32 %vl) { @@ -12755,7 +12755,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32( undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -12776,7 +12776,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(float*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(,,,, float*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16(,,,, float*, , , i32, i32) define @test_vluxseg4_nxv4f32_nxv4i16(float* %base, %index, i32 %vl) { @@ -12787,7 +12787,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16( undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -12809,7 +12809,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(float*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(,,,, float*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8(,,,, float*, , , i32, i32) define @test_vluxseg4_nxv4f32_nxv4i8(float* %base, %index, i32 %vl) { @@ -12820,7 +12820,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8( undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -12842,7 +12842,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(float*, , i32) +declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(,,,, float*, , i32) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32(,,,, float*, , , i32, i32) define @test_vluxseg4_nxv4f32_nxv4i32(float* %base, %index, i32 %vl) { @@ -12853,7 +12853,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32( undef, undef, undef, undef, float* %base, %index, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll @@ -2,7 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+experimental-zvfh \ ; RUN: -verify-machineinstrs < %s | FileCheck %s -declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(,, i16*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i64, i64) define @test_vluxseg2_nxv16i16_nxv16i16(i16* %base, %index, i64 %vl) { @@ -13,7 +13,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -32,7 +32,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(,, i16*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(,, i16*, , , i64, i64) define @test_vluxseg2_nxv16i16_nxv16i8(i16* %base, %index, i64 %vl) { @@ -43,7 +43,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -62,7 +62,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(,, i16*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(,, i16*, , , i64, i64) define @test_vluxseg2_nxv16i16_nxv16i32(i16* %base, %index, i64 %vl) { @@ -73,7 +73,7 @@ ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -92,7 +92,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(i32*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(,, i32*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(,, i32*, , , i64, i64) define @test_vluxseg2_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { @@ -103,7 +103,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -122,7 +122,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(i32*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(,, i32*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(,, i32*, , , i64, i64) define @test_vluxseg2_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { @@ -133,7 +133,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -152,7 +152,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i64(i32*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i64(,, i32*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64(,, i32*, , , i64, i64) define @test_vluxseg2_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { @@ -163,7 +163,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i64( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -182,7 +182,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(i32*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(,, i32*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16(,, i32*, , , i64, i64) define @test_vluxseg2_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { @@ -193,7 +193,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -212,7 +212,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(i32*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(,,, i32*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { @@ -223,7 +223,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -244,7 +244,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(i32*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(,,, i32*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { @@ -255,7 +255,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -276,7 +276,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i64(i32*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i64(,,, i32*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { @@ -287,7 +287,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i64( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -307,7 +307,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(i32*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(,,, i32*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { @@ -318,7 +318,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -339,7 +339,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(i32*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { @@ -350,7 +350,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -372,7 +372,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(i32*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { @@ -383,7 +383,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -405,7 +405,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i64(i32*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i64(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { @@ -416,7 +416,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i64( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -438,7 +438,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(i32*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { @@ -449,7 +449,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -471,7 +471,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16(,, i8*, , , i64, i64) define @test_vluxseg2_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { @@ -482,7 +482,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -501,7 +501,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(,, i8*, , , i64, i64) define @test_vluxseg2_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { @@ -512,7 +512,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -531,7 +531,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(,, i8*, , , i64, i64) define @test_vluxseg2_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { @@ -542,7 +542,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -561,7 +561,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(i8*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(,,, i8*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { @@ -572,7 +572,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -592,7 +592,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(i8*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(,,, i8*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { @@ -603,7 +603,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -624,7 +624,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(i8*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(,,, i8*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { @@ -635,7 +635,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -655,7 +655,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(i8*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { @@ -666,7 +666,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -688,7 +688,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(i8*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { @@ -699,7 +699,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -721,7 +721,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(i8*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { @@ -732,7 +732,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -753,7 +753,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i64(i64*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i64(,, i64*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64(,, i64*, , , i64, i64) define @test_vluxseg2_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { @@ -764,7 +764,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i64( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -783,7 +783,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i32(i64*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i32(,, i64*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32(,, i64*, , , i64, i64) define @test_vluxseg2_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { @@ -794,7 +794,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i32( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -813,7 +813,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i16(i64*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i16(,, i64*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16(,, i64*, , , i64, i64) define @test_vluxseg2_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { @@ -824,7 +824,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i16( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -843,7 +843,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i8(i64*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i8(,, i64*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8(,, i64*, , , i64, i64) define @test_vluxseg2_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { @@ -854,7 +854,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i8( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -873,7 +873,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i64(i64*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i64(,,, i64*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64(,,, i64*, , , i64, i64) define @test_vluxseg3_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { @@ -884,7 +884,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i64( undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -905,7 +905,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i32(i64*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i32(,,, i64*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32(,,, i64*, , , i64, i64) define @test_vluxseg3_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { @@ -916,7 +916,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i32( undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -937,7 +937,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i16(i64*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i16(,,, i64*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16(,,, i64*, , , i64, i64) define @test_vluxseg3_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { @@ -948,7 +948,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i16( undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -969,7 +969,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i8(i64*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i8(,,, i64*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8(,,, i64*, , , i64, i64) define @test_vluxseg3_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { @@ -980,7 +980,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i8( undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1001,7 +1001,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i64(i64*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i64(,,,, i64*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64(,,,, i64*, , , i64, i64) define @test_vluxseg4_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { @@ -1012,7 +1012,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i64( undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1034,7 +1034,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i32(i64*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i32(,,,, i64*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32(,,,, i64*, , , i64, i64) define @test_vluxseg4_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { @@ -1045,7 +1045,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i32( undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1067,7 +1067,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i16(i64*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i16(,,,, i64*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16(,,,, i64*, , , i64, i64) define @test_vluxseg4_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { @@ -1078,7 +1078,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i16( undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1100,7 +1100,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i8(i64*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i8(,,,, i64*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8(,,,, i64*, , , i64, i64) define @test_vluxseg4_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { @@ -1111,7 +1111,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i8( undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1133,7 +1133,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i64(i64*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i64(,,,,, i64*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64(,,,,, i64*, , , i64, i64) define @test_vluxseg5_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { @@ -1144,7 +1144,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i64( undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1167,7 +1167,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i32(i64*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i32(,,,,, i64*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32(,,,,, i64*, , , i64, i64) define @test_vluxseg5_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { @@ -1178,7 +1178,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i32( undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1201,7 +1201,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i16(i64*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i16(,,,,, i64*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16(,,,,, i64*, , , i64, i64) define @test_vluxseg5_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { @@ -1212,7 +1212,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i16( undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1235,7 +1235,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i8(i64*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i8(,,,,, i64*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8(,,,,, i64*, , , i64, i64) define @test_vluxseg5_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { @@ -1246,7 +1246,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i8( undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -1269,7 +1269,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i64(i64*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i64(,,,,,, i64*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64(,,,,,, i64*, , , i64, i64) define @test_vluxseg6_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { @@ -1280,7 +1280,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i64( undef, undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1304,7 +1304,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i32(i64*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i32(,,,,,, i64*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32(,,,,,, i64*, , , i64, i64) define @test_vluxseg6_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { @@ -1315,7 +1315,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i32( undef, undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1339,7 +1339,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i16(i64*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i16(,,,,,, i64*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16(,,,,,, i64*, , , i64, i64) define @test_vluxseg6_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { @@ -1350,7 +1350,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i16( undef, undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1374,7 +1374,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i8(i64*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i8(,,,,,, i64*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8(,,,,,, i64*, , , i64, i64) define @test_vluxseg6_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { @@ -1385,7 +1385,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i8( undef, undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -1409,7 +1409,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i64(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i64(,,,,,,, i64*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64(,,,,,,, i64*, , , i64, i64) define @test_vluxseg7_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { @@ -1420,7 +1420,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i64( undef, undef, undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1445,7 +1445,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i32(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i32(,,,,,,, i64*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32(,,,,,,, i64*, , , i64, i64) define @test_vluxseg7_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { @@ -1456,7 +1456,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1481,7 +1481,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i16(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i16(,,,,,,, i64*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16(,,,,,,, i64*, , , i64, i64) define @test_vluxseg7_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { @@ -1492,7 +1492,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1517,7 +1517,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i8(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i8(,,,,,,, i64*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8(,,,,,,, i64*, , , i64, i64) define @test_vluxseg7_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { @@ -1528,7 +1528,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -1553,7 +1553,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i64(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i64(,,,,,,,, i64*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64(,,,,,,,, i64*, , , i64, i64) define @test_vluxseg8_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { @@ -1564,7 +1564,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1590,7 +1590,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i32(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i32(,,,,,,,, i64*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32(,,,,,,,, i64*, , , i64, i64) define @test_vluxseg8_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { @@ -1601,7 +1601,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1627,7 +1627,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i16(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i16(,,,,,,,, i64*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16(,,,,,,,, i64*, , , i64, i64) define @test_vluxseg8_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { @@ -1638,7 +1638,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1664,7 +1664,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i8(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i8(,,,,,,,, i64*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8(,,,,,,,, i64*, , , i64, i64) define @test_vluxseg8_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { @@ -1675,7 +1675,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -1701,7 +1701,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i64(i32*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i64(,, i32*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64(,, i32*, , , i64, i64) define @test_vluxseg2_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { @@ -1712,7 +1712,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i64( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1731,7 +1731,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32(i32*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32(,, i32*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(,, i32*, , , i64, i64) define @test_vluxseg2_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { @@ -1742,7 +1742,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1761,7 +1761,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(i32*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(,, i32*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(,, i32*, , , i64, i64) define @test_vluxseg2_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { @@ -1772,7 +1772,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1791,7 +1791,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8(i32*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8(,, i32*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8(,, i32*, , , i64, i64) define @test_vluxseg2_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { @@ -1802,7 +1802,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -1821,7 +1821,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i64(i32*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i64(,,, i32*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { @@ -1832,7 +1832,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i64( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1853,7 +1853,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32(i32*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32(,,, i32*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { @@ -1864,7 +1864,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1885,7 +1885,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(i32*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(,,, i32*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { @@ -1896,7 +1896,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1917,7 +1917,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8(i32*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8(,,, i32*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { @@ -1928,7 +1928,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -1949,7 +1949,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i64(i32*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i64(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { @@ -1960,7 +1960,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i64( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -1982,7 +1982,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32(i32*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { @@ -1993,7 +1993,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2015,7 +2015,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(i32*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { @@ -2026,7 +2026,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2048,7 +2048,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8(i32*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { @@ -2059,7 +2059,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2081,7 +2081,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i64(i32*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i64(,,,,, i32*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64(,,,,, i32*, , , i64, i64) define @test_vluxseg5_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { @@ -2092,7 +2092,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i64( undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2115,7 +2115,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32(i32*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32(,,,,, i32*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32(,,,,, i32*, , , i64, i64) define @test_vluxseg5_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { @@ -2126,7 +2126,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2149,7 +2149,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(i32*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(,,,,, i32*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16(,,,,, i32*, , , i64, i64) define @test_vluxseg5_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { @@ -2160,7 +2160,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2183,7 +2183,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8(i32*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8(,,,,, i32*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8(,,,,, i32*, , , i64, i64) define @test_vluxseg5_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { @@ -2194,7 +2194,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -2217,7 +2217,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i64(i32*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i64(,,,,,, i32*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64(,,,,,, i32*, , , i64, i64) define @test_vluxseg6_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { @@ -2228,7 +2228,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i64( undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2252,7 +2252,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32(i32*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32(,,,,,, i32*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32(,,,,,, i32*, , , i64, i64) define @test_vluxseg6_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { @@ -2263,7 +2263,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2287,7 +2287,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(i32*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(,,,,,, i32*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16(,,,,,, i32*, , , i64, i64) define @test_vluxseg6_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { @@ -2298,7 +2298,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2322,7 +2322,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8(i32*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8(,,,,,, i32*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8(,,,,,, i32*, , , i64, i64) define @test_vluxseg6_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { @@ -2333,7 +2333,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -2357,7 +2357,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i64(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i64(,,,,,,, i32*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64(,,,,,,, i32*, , , i64, i64) define @test_vluxseg7_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { @@ -2368,7 +2368,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i64( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2393,7 +2393,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32(,,,,,,, i32*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32(,,,,,,, i32*, , , i64, i64) define @test_vluxseg7_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { @@ -2404,7 +2404,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2429,7 +2429,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(,,,,,,, i32*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16(,,,,,,, i32*, , , i64, i64) define @test_vluxseg7_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { @@ -2440,7 +2440,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2465,7 +2465,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8(,,,,,,, i32*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8(,,,,,,, i32*, , , i64, i64) define @test_vluxseg7_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { @@ -2476,7 +2476,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -2501,7 +2501,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i64(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i64(,,,,,,,, i32*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64(,,,,,,,, i32*, , , i64, i64) define @test_vluxseg8_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { @@ -2512,7 +2512,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2538,7 +2538,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32(,,,,,,,, i32*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, i32*, , , i64, i64) define @test_vluxseg8_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { @@ -2549,7 +2549,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2575,7 +2575,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(,,,,,,,, i32*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, i32*, , , i64, i64) define @test_vluxseg8_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { @@ -2586,7 +2586,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2612,7 +2612,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8(,,,,,,,, i32*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, i32*, , , i64, i64) define @test_vluxseg8_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { @@ -2623,7 +2623,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -2649,7 +2649,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(,, i16*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16(,, i16*, , , i64, i64) define @test_vluxseg2_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { @@ -2660,7 +2660,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2679,7 +2679,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(,, i16*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8(,, i16*, , , i64, i64) define @test_vluxseg2_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { @@ -2690,7 +2690,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2709,7 +2709,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i64(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i64(,, i16*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64(,, i16*, , , i64, i64) define @test_vluxseg2_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { @@ -2720,7 +2720,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i64( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2739,7 +2739,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(,, i16*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32(,, i16*, , , i64, i64) define @test_vluxseg2_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { @@ -2750,7 +2750,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -2769,7 +2769,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(i16*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(,,, i16*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { @@ -2780,7 +2780,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2801,7 +2801,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(i16*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(,,, i16*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { @@ -2812,7 +2812,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2833,7 +2833,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i64(i16*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i64(,,, i16*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { @@ -2844,7 +2844,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i64( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2864,7 +2864,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(i16*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(,,, i16*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { @@ -2875,7 +2875,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -2895,7 +2895,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(i16*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { @@ -2906,7 +2906,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2928,7 +2928,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(i16*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { @@ -2939,7 +2939,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2961,7 +2961,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i64(i16*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i64(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { @@ -2972,7 +2972,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i64( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -2993,7 +2993,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(i16*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { @@ -3004,7 +3004,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3026,7 +3026,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(,, i8*, , , i64, i64) define @test_vluxseg2_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { @@ -3037,7 +3037,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3056,7 +3056,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(,, i8*, , , i64, i64) define @test_vluxseg2_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { @@ -3067,7 +3067,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3086,7 +3086,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i64(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i64(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64(,, i8*, , , i64, i64) define @test_vluxseg2_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { @@ -3097,7 +3097,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i64( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3116,7 +3116,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(,, i8*, , , i64, i64) define @test_vluxseg2_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { @@ -3127,7 +3127,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -3146,7 +3146,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32(i8*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32(,,, i8*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { @@ -3157,7 +3157,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3177,7 +3177,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8(i8*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8(,,, i8*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { @@ -3188,7 +3188,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3209,7 +3209,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i64(i8*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i64(,,, i8*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { @@ -3220,7 +3220,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i64( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3240,7 +3240,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16(i8*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16(,,, i8*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { @@ -3251,7 +3251,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -3272,7 +3272,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32(i8*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { @@ -3283,7 +3283,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3305,7 +3305,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8(i8*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { @@ -3316,7 +3316,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3338,7 +3338,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i64(i8*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i64(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { @@ -3349,7 +3349,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i64( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3370,7 +3370,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16(i8*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { @@ -3381,7 +3381,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -3403,7 +3403,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { @@ -3414,7 +3414,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3437,7 +3437,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { @@ -3448,7 +3448,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3471,7 +3471,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i64(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { @@ -3482,7 +3482,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i64( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3504,7 +3504,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { @@ -3515,7 +3515,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -3538,7 +3538,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { @@ -3549,7 +3549,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3573,7 +3573,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { @@ -3584,7 +3584,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3608,7 +3608,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i64(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { @@ -3619,7 +3619,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i64( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3643,7 +3643,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { @@ -3654,7 +3654,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -3678,7 +3678,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { @@ -3689,7 +3689,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3714,7 +3714,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { @@ -3725,7 +3725,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3750,7 +3750,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i64(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { @@ -3761,7 +3761,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i64( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3786,7 +3786,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { @@ -3797,7 +3797,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -3822,7 +3822,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { @@ -3833,7 +3833,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3859,7 +3859,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { @@ -3870,7 +3870,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3896,7 +3896,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i64(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { @@ -3907,7 +3907,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i64( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3933,7 +3933,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { @@ -3944,7 +3944,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -3970,7 +3970,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i64(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i64(,, i16*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64(,, i16*, , , i64, i64) define @test_vluxseg2_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { @@ -3981,7 +3981,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i64( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4000,7 +4000,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32(,, i16*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(,, i16*, , , i64, i64) define @test_vluxseg2_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { @@ -4011,7 +4011,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4030,7 +4030,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16(,, i16*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(,, i16*, , , i64, i64) define @test_vluxseg2_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { @@ -4041,7 +4041,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4060,7 +4060,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8(,, i16*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8(,, i16*, , , i64, i64) define @test_vluxseg2_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { @@ -4071,7 +4071,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4090,7 +4090,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i64(i16*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i64(,,, i16*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { @@ -4101,7 +4101,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i64( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4122,7 +4122,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32(i16*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32(,,, i16*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { @@ -4133,7 +4133,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4154,7 +4154,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16(i16*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16(,,, i16*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { @@ -4165,7 +4165,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4186,7 +4186,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8(i16*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8(,,, i16*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { @@ -4197,7 +4197,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -4218,7 +4218,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i64(i16*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i64(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { @@ -4229,7 +4229,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i64( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4251,7 +4251,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32(i16*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { @@ -4262,7 +4262,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4284,7 +4284,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16(i16*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { @@ -4295,7 +4295,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4317,7 +4317,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8(i16*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { @@ -4328,7 +4328,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -4350,7 +4350,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i64(i16*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i64(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { @@ -4361,7 +4361,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i64( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4384,7 +4384,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { @@ -4395,7 +4395,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4418,7 +4418,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { @@ -4429,7 +4429,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4452,7 +4452,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { @@ -4463,7 +4463,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -4486,7 +4486,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i64(i16*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i64(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { @@ -4497,7 +4497,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i64( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4521,7 +4521,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { @@ -4532,7 +4532,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4556,7 +4556,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { @@ -4567,7 +4567,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4591,7 +4591,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { @@ -4602,7 +4602,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -4626,7 +4626,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i64(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i64(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { @@ -4637,7 +4637,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i64( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4662,7 +4662,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { @@ -4673,7 +4673,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4698,7 +4698,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { @@ -4709,7 +4709,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4734,7 +4734,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { @@ -4745,7 +4745,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -4770,7 +4770,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i64(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i64(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { @@ -4781,7 +4781,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4807,7 +4807,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { @@ -4818,7 +4818,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4844,7 +4844,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { @@ -4855,7 +4855,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4881,7 +4881,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { @@ -4892,7 +4892,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -4918,7 +4918,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(i32*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(,, i32*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(,, i32*, , , i64, i64) define @test_vluxseg2_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { @@ -4929,7 +4929,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4948,7 +4948,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(i32*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(,, i32*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(,, i32*, , , i64, i64) define @test_vluxseg2_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { @@ -4959,7 +4959,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -4978,7 +4978,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(i32*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(,, i32*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(,, i32*, , , i64, i64) define @test_vluxseg2_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { @@ -4989,7 +4989,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5008,7 +5008,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i64(i32*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i64(,, i32*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64(,, i32*, , , i64, i64) define @test_vluxseg2_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { @@ -5019,7 +5019,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i64( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5038,7 +5038,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(i32*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(,,, i32*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { @@ -5049,7 +5049,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -5070,7 +5070,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(i32*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(,,, i32*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { @@ -5081,7 +5081,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -5102,7 +5102,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(i32*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(,,, i32*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { @@ -5113,7 +5113,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -5134,7 +5134,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i64(i32*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i64(,,, i32*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64(,,, i32*, , , i64, i64) define @test_vluxseg3_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { @@ -5145,7 +5145,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i64( undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -5165,7 +5165,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(i32*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { @@ -5176,7 +5176,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -5198,7 +5198,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(i32*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { @@ -5209,7 +5209,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -5231,7 +5231,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(i32*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { @@ -5242,7 +5242,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -5264,7 +5264,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i64(i32*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i64(,,,, i32*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64(,,,, i32*, , , i64, i64) define @test_vluxseg4_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { @@ -5275,7 +5275,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i64( undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -5297,7 +5297,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(i32*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(,,,,, i32*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32(,,,,, i32*, , , i64, i64) define @test_vluxseg5_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { @@ -5308,7 +5308,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -5331,7 +5331,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(i32*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(,,,,, i32*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8(,,,,, i32*, , , i64, i64) define @test_vluxseg5_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { @@ -5342,7 +5342,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -5365,7 +5365,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(i32*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(,,,,, i32*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16(,,,,, i32*, , , i64, i64) define @test_vluxseg5_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { @@ -5376,7 +5376,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -5399,7 +5399,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i64(i32*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i64(,,,,, i32*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64(,,,,, i32*, , , i64, i64) define @test_vluxseg5_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { @@ -5410,7 +5410,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i64( undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -5433,7 +5433,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(i32*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(,,,,,, i32*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32(,,,,,, i32*, , , i64, i64) define @test_vluxseg6_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { @@ -5444,7 +5444,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -5468,7 +5468,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(i32*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(,,,,,, i32*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8(,,,,,, i32*, , , i64, i64) define @test_vluxseg6_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { @@ -5479,7 +5479,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -5503,7 +5503,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(i32*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(,,,,,, i32*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16(,,,,,, i32*, , , i64, i64) define @test_vluxseg6_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { @@ -5514,7 +5514,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -5538,7 +5538,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i64(i32*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i64(,,,,,, i32*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64(,,,,,, i32*, , , i64, i64) define @test_vluxseg6_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { @@ -5549,7 +5549,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i64( undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -5573,7 +5573,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(,,,,,,, i32*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32(,,,,,,, i32*, , , i64, i64) define @test_vluxseg7_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { @@ -5584,7 +5584,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -5609,7 +5609,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(,,,,,,, i32*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8(,,,,,,, i32*, , , i64, i64) define @test_vluxseg7_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { @@ -5620,7 +5620,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -5645,7 +5645,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(,,,,,,, i32*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16(,,,,,,, i32*, , , i64, i64) define @test_vluxseg7_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { @@ -5656,7 +5656,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -5681,7 +5681,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i64(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i64(,,,,,,, i32*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64(,,,,,,, i32*, , , i64, i64) define @test_vluxseg7_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { @@ -5692,7 +5692,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i64( undef, undef, undef, undef, undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -5717,7 +5717,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(,,,,,,,, i32*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, i32*, , , i64, i64) define @test_vluxseg8_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { @@ -5728,7 +5728,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -5754,7 +5754,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(,,,,,,,, i32*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, i32*, , , i64, i64) define @test_vluxseg8_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { @@ -5765,7 +5765,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -5791,7 +5791,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(,,,,,,,, i32*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, i32*, , , i64, i64) define @test_vluxseg8_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { @@ -5802,7 +5802,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -5828,7 +5828,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i64(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i64(,,,,,,,, i32*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64(,,,,,,,, i32*, , , i64, i64) define @test_vluxseg8_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { @@ -5839,7 +5839,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i64( undef, undef , undef , undef, undef , undef, undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -5865,7 +5865,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(,, i8*, , , i64, i64) define @test_vluxseg2_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { @@ -5876,7 +5876,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5895,7 +5895,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8(,, i8*, , , i64, i64) define @test_vluxseg2_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { @@ -5906,7 +5906,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5925,7 +5925,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i64(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i64(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64(,, i8*, , , i64, i64) define @test_vluxseg2_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { @@ -5936,7 +5936,7 @@ ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i64( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5955,7 +5955,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32(,, i8*, , , i64, i64) define @test_vluxseg2_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { @@ -5966,7 +5966,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -5985,7 +5985,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(i8*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(,,, i8*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { @@ -5996,7 +5996,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -6016,7 +6016,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(i8*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(,,, i8*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { @@ -6027,7 +6027,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -6048,7 +6048,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i64(i8*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i64(,,, i8*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { @@ -6059,7 +6059,7 @@ ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i64( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -6079,7 +6079,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(i8*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(,,, i8*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { @@ -6090,7 +6090,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -6110,7 +6110,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(i8*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { @@ -6121,7 +6121,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -6143,7 +6143,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(i8*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { @@ -6154,7 +6154,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -6176,7 +6176,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i64(i8*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i64(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { @@ -6187,7 +6187,7 @@ ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i64( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -6208,7 +6208,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(i8*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { @@ -6219,7 +6219,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -6240,7 +6240,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { @@ -6251,7 +6251,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -6274,7 +6274,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { @@ -6285,7 +6285,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -6308,7 +6308,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i64(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { @@ -6319,7 +6319,7 @@ ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i64( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -6341,7 +6341,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { @@ -6352,7 +6352,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -6374,7 +6374,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { @@ -6385,7 +6385,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -6409,7 +6409,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { @@ -6420,7 +6420,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -6444,7 +6444,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i64(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { @@ -6455,7 +6455,7 @@ ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i64( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -6478,7 +6478,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { @@ -6489,7 +6489,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -6513,7 +6513,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { @@ -6524,7 +6524,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -6549,7 +6549,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { @@ -6560,7 +6560,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -6585,7 +6585,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i64(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { @@ -6596,7 +6596,7 @@ ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i64( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -6620,7 +6620,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { @@ -6631,7 +6631,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -6656,7 +6656,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { @@ -6667,7 +6667,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -6693,7 +6693,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { @@ -6704,7 +6704,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -6730,7 +6730,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i64(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { @@ -6741,7 +6741,7 @@ ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i64( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -6766,7 +6766,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { @@ -6777,7 +6777,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -6803,7 +6803,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i32(i64*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i32(,, i64*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32(,, i64*, , , i64, i64) define @test_vluxseg2_nxv4i64_nxv4i32(i64* %base, %index, i64 %vl) { @@ -6814,7 +6814,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i32(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i32( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -6833,7 +6833,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i8(i64*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i8(,, i64*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8(,, i64*, , , i64, i64) define @test_vluxseg2_nxv4i64_nxv4i8(i64* %base, %index, i64 %vl) { @@ -6844,7 +6844,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i8(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i8( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -6863,7 +6863,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i64(i64*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i64(,, i64*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64(,, i64*, , , i64, i64) define @test_vluxseg2_nxv4i64_nxv4i64(i64* %base, %index, i64 %vl) { @@ -6874,7 +6874,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i64(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i64( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -6893,7 +6893,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i16(i64*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i16(,, i64*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16(,, i64*, , , i64, i64) define @test_vluxseg2_nxv4i64_nxv4i16(i64* %base, %index, i64 %vl) { @@ -6904,7 +6904,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i16(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i16( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -6923,7 +6923,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(,, i16*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(,, i16*, , , i64, i64) define @test_vluxseg2_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { @@ -6934,7 +6934,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -6953,7 +6953,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(,, i16*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(,, i16*, , , i64, i64) define @test_vluxseg2_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { @@ -6964,7 +6964,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -6983,7 +6983,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i64(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i64(,, i16*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64(,, i16*, , , i64, i64) define @test_vluxseg2_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { @@ -6994,7 +6994,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i64( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7013,7 +7013,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(,, i16*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16(,, i16*, , , i64, i64) define @test_vluxseg2_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { @@ -7024,7 +7024,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7043,7 +7043,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(i16*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(,,, i16*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { @@ -7054,7 +7054,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -7074,7 +7074,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(i16*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(,,, i16*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { @@ -7085,7 +7085,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -7106,7 +7106,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i64(i16*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i64(,,, i16*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { @@ -7117,7 +7117,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i64( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -7137,7 +7137,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(i16*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(,,, i16*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { @@ -7148,7 +7148,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -7169,7 +7169,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(i16*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { @@ -7180,7 +7180,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -7202,7 +7202,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(i16*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { @@ -7213,7 +7213,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -7235,7 +7235,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i64(i16*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i64(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { @@ -7246,7 +7246,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i64( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -7267,7 +7267,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(i16*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { @@ -7278,7 +7278,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -7300,7 +7300,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { @@ -7311,7 +7311,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -7334,7 +7334,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { @@ -7345,7 +7345,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -7368,7 +7368,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i64(i16*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i64(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { @@ -7379,7 +7379,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i64( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -7401,7 +7401,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { @@ -7412,7 +7412,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -7435,7 +7435,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { @@ -7446,7 +7446,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -7470,7 +7470,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { @@ -7481,7 +7481,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -7505,7 +7505,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i64(i16*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i64(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { @@ -7516,7 +7516,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i64( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -7540,7 +7540,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { @@ -7551,7 +7551,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -7575,7 +7575,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { @@ -7586,7 +7586,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -7611,7 +7611,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { @@ -7622,7 +7622,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -7647,7 +7647,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i64(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i64(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { @@ -7658,7 +7658,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i64( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -7683,7 +7683,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { @@ -7694,7 +7694,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -7719,7 +7719,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { @@ -7730,7 +7730,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -7756,7 +7756,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { @@ -7767,7 +7767,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -7793,7 +7793,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i64(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i64(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { @@ -7804,7 +7804,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i64( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -7830,7 +7830,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { @@ -7841,7 +7841,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -7867,7 +7867,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i64(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i64(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64(,, i8*, , , i64, i64) define @test_vluxseg2_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { @@ -7878,7 +7878,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i64( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7897,7 +7897,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(,, i8*, , , i64, i64) define @test_vluxseg2_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { @@ -7908,7 +7908,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7927,7 +7927,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(,, i8*, , , i64, i64) define @test_vluxseg2_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { @@ -7938,7 +7938,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7957,7 +7957,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(,, i8*, , , i64, i64) define @test_vluxseg2_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { @@ -7968,7 +7968,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -7987,7 +7987,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i64(i8*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i64(,,, i8*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { @@ -7998,7 +7998,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i64( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -8019,7 +8019,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32(i8*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32(,,, i8*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { @@ -8030,7 +8030,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -8051,7 +8051,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16(i8*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16(,,, i8*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { @@ -8062,7 +8062,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -8083,7 +8083,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8(i8*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8(,,, i8*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { @@ -8094,7 +8094,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -8115,7 +8115,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i64(i8*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i64(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { @@ -8126,7 +8126,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i64( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -8148,7 +8148,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32(i8*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { @@ -8159,7 +8159,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -8181,7 +8181,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16(i8*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { @@ -8192,7 +8192,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -8214,7 +8214,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8(i8*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { @@ -8225,7 +8225,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -8247,7 +8247,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i64(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { @@ -8258,7 +8258,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i64( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -8281,7 +8281,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { @@ -8292,7 +8292,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -8315,7 +8315,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { @@ -8326,7 +8326,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -8349,7 +8349,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { @@ -8360,7 +8360,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -8383,7 +8383,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i64(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { @@ -8394,7 +8394,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i64( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -8418,7 +8418,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { @@ -8429,7 +8429,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -8453,7 +8453,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { @@ -8464,7 +8464,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -8488,7 +8488,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { @@ -8499,7 +8499,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -8523,7 +8523,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i64(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { @@ -8534,7 +8534,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i64( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -8559,7 +8559,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { @@ -8570,7 +8570,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -8595,7 +8595,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { @@ -8606,7 +8606,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -8631,7 +8631,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { @@ -8642,7 +8642,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -8667,7 +8667,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i64(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { @@ -8678,7 +8678,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -8704,7 +8704,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { @@ -8715,7 +8715,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -8741,7 +8741,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { @@ -8752,7 +8752,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -8778,7 +8778,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { @@ -8789,7 +8789,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -8815,7 +8815,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(,, i8*, , , i64, i64) define @test_vluxseg2_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { @@ -8826,7 +8826,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -8845,7 +8845,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(,, i8*, , , i64, i64) define @test_vluxseg2_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { @@ -8856,7 +8856,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -8875,7 +8875,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(,, i8*, , , i64, i64) define @test_vluxseg2_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { @@ -8886,7 +8886,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -8905,7 +8905,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i64(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i64(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64(,, i8*, , , i64, i64) define @test_vluxseg2_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { @@ -8916,7 +8916,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i64( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -8935,7 +8935,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32(i8*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32(,,, i8*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { @@ -8946,7 +8946,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -8967,7 +8967,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8(i8*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8(,,, i8*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { @@ -8978,7 +8978,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -8999,7 +8999,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16(i8*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16(,,, i8*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { @@ -9010,7 +9010,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -9031,7 +9031,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i64(i8*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i64(,,, i8*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64(,,, i8*, , , i64, i64) define @test_vluxseg3_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { @@ -9042,7 +9042,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i64( undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -9062,7 +9062,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32(i8*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { @@ -9073,7 +9073,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -9095,7 +9095,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8(i8*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { @@ -9106,7 +9106,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -9128,7 +9128,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16(i8*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { @@ -9139,7 +9139,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -9161,7 +9161,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i64(i8*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i64(,,,, i8*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64(,,,, i8*, , , i64, i64) define @test_vluxseg4_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { @@ -9172,7 +9172,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i64( undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -9194,7 +9194,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { @@ -9205,7 +9205,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -9228,7 +9228,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { @@ -9239,7 +9239,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -9262,7 +9262,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { @@ -9273,7 +9273,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -9296,7 +9296,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i64(,,,,, i8*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64(,,,,, i8*, , , i64, i64) define @test_vluxseg5_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { @@ -9307,7 +9307,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i64( undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -9330,7 +9330,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { @@ -9341,7 +9341,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -9365,7 +9365,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { @@ -9376,7 +9376,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -9400,7 +9400,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { @@ -9411,7 +9411,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -9435,7 +9435,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i64(,,,,,, i8*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64(,,,,,, i8*, , , i64, i64) define @test_vluxseg6_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { @@ -9446,7 +9446,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i64( undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -9470,7 +9470,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { @@ -9481,7 +9481,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -9506,7 +9506,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { @@ -9517,7 +9517,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -9542,7 +9542,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { @@ -9553,7 +9553,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -9578,7 +9578,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i64(,,,,,,, i8*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64(,,,,,,, i8*, , , i64, i64) define @test_vluxseg7_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { @@ -9589,7 +9589,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i64( undef, undef, undef, undef, undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -9614,7 +9614,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { @@ -9625,7 +9625,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -9651,7 +9651,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { @@ -9662,7 +9662,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -9688,7 +9688,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { @@ -9699,7 +9699,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -9725,7 +9725,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i64(,,,,,,,, i8*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64(,,,,,,,, i8*, , , i64, i64) define @test_vluxseg8_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { @@ -9736,7 +9736,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i64( undef, undef , undef , undef, undef , undef, undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -9762,7 +9762,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(i32*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(,, i32*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(,, i32*, , , i64, i64) define @test_vluxseg2_nxv8i32_nxv8i16(i32* %base, %index, i64 %vl) { @@ -9773,7 +9773,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9792,7 +9792,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(i32*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(,, i32*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8(,, i32*, , , i64, i64) define @test_vluxseg2_nxv8i32_nxv8i8(i32* %base, %index, i64 %vl) { @@ -9803,7 +9803,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9822,7 +9822,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i64(i32*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i64(,, i32*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64(,, i32*, , , i64, i64) define @test_vluxseg2_nxv8i32_nxv8i64(i32* %base, %index, i64 %vl) { @@ -9833,7 +9833,7 @@ ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i64(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i64( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9852,7 +9852,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(i32*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(,, i32*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32(,, i32*, , , i64, i64) define @test_vluxseg2_nxv8i32_nxv8i32(i32* %base, %index, i64 %vl) { @@ -9863,7 +9863,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(i32* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32( undef, undef, i32* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9882,7 +9882,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16(,, i8*, , , i64, i64) define @test_vluxseg2_nxv32i8_nxv32i16(i8* %base, %index, i64 %vl) { @@ -9893,7 +9893,7 @@ ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9912,7 +9912,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(i8*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(,, i8*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(,, i8*, , , i64, i64) define @test_vluxseg2_nxv32i8_nxv32i8(i8* %base, %index, i64 %vl) { @@ -9923,7 +9923,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(i8* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8( undef, undef, i8* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9942,7 +9942,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32(,, i16*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32(,, i16*, , , i64, i64) define @test_vluxseg2_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { @@ -9953,7 +9953,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -9972,7 +9972,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8(,, i16*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(,, i16*, , , i64, i64) define @test_vluxseg2_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { @@ -9983,7 +9983,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10002,7 +10002,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16(,, i16*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(,, i16*, , , i64, i64) define @test_vluxseg2_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { @@ -10013,7 +10013,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10032,7 +10032,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i64(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i64(,, i16*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64(,, i16*, , , i64, i64) define @test_vluxseg2_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { @@ -10043,7 +10043,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i64( undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10062,7 +10062,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32(i16*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32(,,, i16*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { @@ -10073,7 +10073,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -10094,7 +10094,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8(i16*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8(,,, i16*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { @@ -10105,7 +10105,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -10126,7 +10126,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16(i16*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16(,,, i16*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { @@ -10137,7 +10137,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -10158,7 +10158,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i64(i16*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i64(,,, i16*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64(,,, i16*, , , i64, i64) define @test_vluxseg3_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { @@ -10169,7 +10169,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i64( undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -10189,7 +10189,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32(i16*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { @@ -10200,7 +10200,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -10222,7 +10222,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8(i16*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { @@ -10233,7 +10233,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -10255,7 +10255,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16(i16*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { @@ -10266,7 +10266,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -10288,7 +10288,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i64(i16*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i64(,,,, i16*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64(,,,, i16*, , , i64, i64) define @test_vluxseg4_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { @@ -10299,7 +10299,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i64( undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -10321,7 +10321,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { @@ -10332,7 +10332,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -10355,7 +10355,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { @@ -10366,7 +10366,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -10389,7 +10389,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { @@ -10400,7 +10400,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -10423,7 +10423,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i64(i16*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i64(,,,,, i16*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64(,,,,, i16*, , , i64, i64) define @test_vluxseg5_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { @@ -10434,7 +10434,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i64( undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -10457,7 +10457,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { @@ -10468,7 +10468,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -10492,7 +10492,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { @@ -10503,7 +10503,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -10527,7 +10527,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { @@ -10538,7 +10538,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -10562,7 +10562,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i64(i16*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i64(,,,,,, i16*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64(,,,,,, i16*, , , i64, i64) define @test_vluxseg6_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { @@ -10573,7 +10573,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i64( undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -10597,7 +10597,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { @@ -10608,7 +10608,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -10633,7 +10633,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { @@ -10644,7 +10644,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -10669,7 +10669,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { @@ -10680,7 +10680,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -10705,7 +10705,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i64(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i64(,,,,,,, i16*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64(,,,,,,, i16*, , , i64, i64) define @test_vluxseg7_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { @@ -10716,7 +10716,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i64( undef, undef, undef, undef, undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -10741,7 +10741,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { @@ -10752,7 +10752,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -10778,7 +10778,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { @@ -10789,7 +10789,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -10815,7 +10815,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { @@ -10826,7 +10826,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -10852,7 +10852,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i64(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i64(,,,,,,,, i16*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64(,,,,,,,, i16*, , , i64, i64) define @test_vluxseg8_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { @@ -10863,7 +10863,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i64( undef, undef , undef , undef, undef , undef, undef, undef, i16* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -10889,7 +10889,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i32(i64*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i32(,, i64*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32(,, i64*, , , i64, i64) define @test_vluxseg2_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { @@ -10900,7 +10900,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i32( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10919,7 +10919,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i8(i64*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i8(,, i64*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8(,, i64*, , , i64, i64) define @test_vluxseg2_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { @@ -10930,7 +10930,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i8( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10949,7 +10949,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i16(i64*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i16(,, i64*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16(,, i64*, , , i64, i64) define @test_vluxseg2_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { @@ -10960,7 +10960,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i16( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -10979,7 +10979,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i64(i64*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i64(,, i64*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64(,, i64*, , , i64, i64) define @test_vluxseg2_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { @@ -10990,7 +10990,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i64( undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11009,7 +11009,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i32(i64*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i32(,,, i64*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32(,,, i64*, , , i64, i64) define @test_vluxseg3_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { @@ -11020,7 +11020,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i32( undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11041,7 +11041,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i8(i64*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i8(,,, i64*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8(,,, i64*, , , i64, i64) define @test_vluxseg3_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { @@ -11052,7 +11052,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i8( undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11073,7 +11073,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i16(i64*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i16(,,, i64*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16(,,, i64*, , , i64, i64) define @test_vluxseg3_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { @@ -11084,7 +11084,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i16( undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11105,7 +11105,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i64(i64*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i64(,,, i64*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64(,,, i64*, , , i64, i64) define @test_vluxseg3_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { @@ -11116,7 +11116,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i64( undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11137,7 +11137,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i32(i64*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i32(,,,, i64*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32(,,,, i64*, , , i64, i64) define @test_vluxseg4_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { @@ -11148,7 +11148,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i32( undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11170,7 +11170,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i8(i64*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i8(,,,, i64*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8(,,,, i64*, , , i64, i64) define @test_vluxseg4_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { @@ -11181,7 +11181,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i8( undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11203,7 +11203,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i16(i64*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i16(,,,, i64*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16(,,,, i64*, , , i64, i64) define @test_vluxseg4_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { @@ -11214,7 +11214,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i16( undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11236,7 +11236,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i64(i64*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i64(,,,, i64*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64(,,,, i64*, , , i64, i64) define @test_vluxseg4_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { @@ -11247,7 +11247,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i64( undef, undef, undef, undef, i64* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11269,7 +11269,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(half*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(,, half*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16(,, half*, , , i64, i64) define @test_vluxseg2_nxv16f16_nxv16i16(half* %base, %index, i64 %vl) { @@ -11280,7 +11280,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11299,7 +11299,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(half*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(,, half*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(,, half*, , , i64, i64) define @test_vluxseg2_nxv16f16_nxv16i8(half* %base, %index, i64 %vl) { @@ -11310,7 +11310,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11329,7 +11329,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(half*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(,, half*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(,, half*, , , i64, i64) define @test_vluxseg2_nxv16f16_nxv16i32(half* %base, %index, i64 %vl) { @@ -11340,7 +11340,7 @@ ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11359,7 +11359,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(double*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(,, double*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(,, double*, , , i64, i64) define @test_vluxseg2_nxv4f64_nxv4i32(double* %base, %index, i64 %vl) { @@ -11370,7 +11370,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11389,7 +11389,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(double*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(,, double*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(,, double*, , , i64, i64) define @test_vluxseg2_nxv4f64_nxv4i8(double* %base, %index, i64 %vl) { @@ -11400,7 +11400,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11419,7 +11419,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i64(double*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i64(,, double*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64(,, double*, , , i64, i64) define @test_vluxseg2_nxv4f64_nxv4i64(double* %base, %index, i64 %vl) { @@ -11430,7 +11430,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i64(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i64( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11449,7 +11449,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(double*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(,, double*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(,, double*, , , i64, i64) define @test_vluxseg2_nxv4f64_nxv4i16(double* %base, %index, i64 %vl) { @@ -11460,7 +11460,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11479,7 +11479,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i64(double*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i64(,, double*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64(,, double*, , , i64, i64) define @test_vluxseg2_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { @@ -11490,7 +11490,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i64( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11509,7 +11509,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(double*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(,, double*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(,, double*, , , i64, i64) define @test_vluxseg2_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { @@ -11520,7 +11520,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11539,7 +11539,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(double*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(,, double*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(,, double*, , , i64, i64) define @test_vluxseg2_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { @@ -11550,7 +11550,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11569,7 +11569,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(double*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(,, double*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8(,, double*, , , i64, i64) define @test_vluxseg2_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { @@ -11580,7 +11580,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -11599,7 +11599,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i64(double*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i64(,,, double*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64(,,, double*, , , i64, i64) define @test_vluxseg3_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { @@ -11610,7 +11610,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i64( undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11631,7 +11631,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(double*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(,,, double*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(,,, double*, , , i64, i64) define @test_vluxseg3_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { @@ -11642,7 +11642,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32( undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11663,7 +11663,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(double*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(,,, double*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(,,, double*, , , i64, i64) define @test_vluxseg3_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { @@ -11674,7 +11674,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16( undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11695,7 +11695,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(double*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(,,, double*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(,,, double*, , , i64, i64) define @test_vluxseg3_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { @@ -11706,7 +11706,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8( undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -11727,7 +11727,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i64(double*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i64(,,,, double*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64(,,,, double*, , , i64, i64) define @test_vluxseg4_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { @@ -11738,7 +11738,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i64( undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11760,7 +11760,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(double*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(,,,, double*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32(,,,, double*, , , i64, i64) define @test_vluxseg4_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { @@ -11771,7 +11771,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32( undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11793,7 +11793,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(double*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(,,,, double*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16(,,,, double*, , , i64, i64) define @test_vluxseg4_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { @@ -11804,7 +11804,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16( undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11826,7 +11826,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(double*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(,,,, double*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8(,,,, double*, , , i64, i64) define @test_vluxseg4_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { @@ -11837,7 +11837,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8( undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -11859,7 +11859,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i64(double*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i64(,,,,, double*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64(,,,,, double*, , , i64, i64) define @test_vluxseg5_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { @@ -11870,7 +11870,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i64( undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -11893,7 +11893,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(double*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(,,,,, double*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32(,,,,, double*, , , i64, i64) define @test_vluxseg5_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { @@ -11904,7 +11904,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -11927,7 +11927,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(double*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(,,,,, double*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16(,,,,, double*, , , i64, i64) define @test_vluxseg5_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { @@ -11938,7 +11938,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -11961,7 +11961,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(double*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(,,,,, double*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8(,,,,, double*, , , i64, i64) define @test_vluxseg5_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { @@ -11972,7 +11972,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -11995,7 +11995,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i64(double*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i64(,,,,,, double*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64(,,,,,, double*, , , i64, i64) define @test_vluxseg6_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { @@ -12006,7 +12006,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i64( undef, undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -12030,7 +12030,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(double*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(,,,,,, double*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32(,,,,,, double*, , , i64, i64) define @test_vluxseg6_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { @@ -12041,7 +12041,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -12065,7 +12065,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(double*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(,,,,,, double*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16(,,,,,, double*, , , i64, i64) define @test_vluxseg6_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { @@ -12076,7 +12076,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -12100,7 +12100,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(double*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(,,,,,, double*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8(,,,,,, double*, , , i64, i64) define @test_vluxseg6_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { @@ -12111,7 +12111,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -12135,7 +12135,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i64(double*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i64(,,,,,,, double*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64(,,,,,,, double*, , , i64, i64) define @test_vluxseg7_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { @@ -12146,7 +12146,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i64( undef, undef, undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -12171,7 +12171,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(double*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(,,,,,,, double*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32(,,,,,,, double*, , , i64, i64) define @test_vluxseg7_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { @@ -12182,7 +12182,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32( undef, undef, undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -12207,7 +12207,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(double*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(,,,,,,, double*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16(,,,,,,, double*, , , i64, i64) define @test_vluxseg7_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { @@ -12218,7 +12218,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16( undef, undef, undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -12243,7 +12243,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(double*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(,,,,,,, double*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8(,,,,,,, double*, , , i64, i64) define @test_vluxseg7_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { @@ -12254,7 +12254,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8( undef, undef, undef, undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -12279,7 +12279,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i64(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i64(,,,,,,,, double*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64(,,,,,,,, double*, , , i64, i64) define @test_vluxseg8_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { @@ -12290,7 +12290,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -12316,7 +12316,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(,,,,,,,, double*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, double*, , , i64, i64) define @test_vluxseg8_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { @@ -12327,7 +12327,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -12353,7 +12353,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(,,,,,,,, double*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, double*, , , i64, i64) define @test_vluxseg8_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { @@ -12364,7 +12364,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -12390,7 +12390,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(,,,,,,,, double*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, double*, , , i64, i64) define @test_vluxseg8_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { @@ -12401,7 +12401,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -12427,7 +12427,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(float*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(,, float*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32(,, float*, , , i64, i64) define @test_vluxseg2_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { @@ -12438,7 +12438,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -12457,7 +12457,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(float*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(,, float*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(,, float*, , , i64, i64) define @test_vluxseg2_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { @@ -12468,7 +12468,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -12487,7 +12487,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(float*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(,, float*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(,, float*, , , i64, i64) define @test_vluxseg2_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { @@ -12498,7 +12498,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -12517,7 +12517,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i64(float*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i64(,, float*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64(,, float*, , , i64, i64) define @test_vluxseg2_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { @@ -12528,7 +12528,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i64( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -12547,7 +12547,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(float*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(,,, float*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(,,, float*, , , i64, i64) define @test_vluxseg3_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { @@ -12558,7 +12558,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -12579,7 +12579,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(float*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(,,, float*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(,,, float*, , , i64, i64) define @test_vluxseg3_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { @@ -12590,7 +12590,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -12611,7 +12611,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(float*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(,,, float*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(,,, float*, , , i64, i64) define @test_vluxseg3_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { @@ -12622,7 +12622,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -12643,7 +12643,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i64(float*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i64(,,, float*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64(,,, float*, , , i64, i64) define @test_vluxseg3_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { @@ -12654,7 +12654,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i64( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -12674,7 +12674,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(float*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(,,,, float*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { @@ -12685,7 +12685,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -12707,7 +12707,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(float*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(,,,, float*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { @@ -12718,7 +12718,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -12740,7 +12740,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(float*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(,,,, float*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { @@ -12751,7 +12751,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -12773,7 +12773,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i64(float*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i64(,,,, float*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { @@ -12784,7 +12784,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i64( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -12806,7 +12806,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(float*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(,,,,, float*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32(,,,,, float*, , , i64, i64) define @test_vluxseg5_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { @@ -12817,7 +12817,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -12840,7 +12840,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(float*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(,,,,, float*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8(,,,,, float*, , , i64, i64) define @test_vluxseg5_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { @@ -12851,7 +12851,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -12874,7 +12874,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(float*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(,,,,, float*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16(,,,,, float*, , , i64, i64) define @test_vluxseg5_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { @@ -12885,7 +12885,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -12908,7 +12908,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i64(float*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i64(,,,,, float*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64(,,,,, float*, , , i64, i64) define @test_vluxseg5_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { @@ -12919,7 +12919,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i64( undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -12942,7 +12942,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(float*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(,,,,,, float*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32(,,,,,, float*, , , i64, i64) define @test_vluxseg6_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { @@ -12953,7 +12953,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -12977,7 +12977,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(float*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(,,,,,, float*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8(,,,,,, float*, , , i64, i64) define @test_vluxseg6_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { @@ -12988,7 +12988,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -13012,7 +13012,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(float*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(,,,,,, float*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16(,,,,,, float*, , , i64, i64) define @test_vluxseg6_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { @@ -13023,7 +13023,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -13047,7 +13047,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i64(float*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i64(,,,,,, float*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64(,,,,,, float*, , , i64, i64) define @test_vluxseg6_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { @@ -13058,7 +13058,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i64( undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -13082,7 +13082,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(float*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(,,,,,,, float*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32(,,,,,,, float*, , , i64, i64) define @test_vluxseg7_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { @@ -13093,7 +13093,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -13118,7 +13118,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(float*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(,,,,,,, float*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8(,,,,,,, float*, , , i64, i64) define @test_vluxseg7_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { @@ -13129,7 +13129,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -13154,7 +13154,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(float*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(,,,,,,, float*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16(,,,,,,, float*, , , i64, i64) define @test_vluxseg7_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { @@ -13165,7 +13165,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -13190,7 +13190,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i64(float*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i64(,,,,,,, float*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64(,,,,,,, float*, , , i64, i64) define @test_vluxseg7_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { @@ -13201,7 +13201,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i64( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -13226,7 +13226,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(,,,,,,,, float*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, float*, , , i64, i64) define @test_vluxseg8_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { @@ -13237,7 +13237,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -13263,7 +13263,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(,,,,,,,, float*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, float*, , , i64, i64) define @test_vluxseg8_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { @@ -13274,7 +13274,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -13300,7 +13300,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(,,,,,,,, float*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, float*, , , i64, i64) define @test_vluxseg8_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { @@ -13311,7 +13311,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -13337,7 +13337,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i64(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i64(,,,,,,,, float*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64(,,,,,,,, float*, , , i64, i64) define @test_vluxseg8_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { @@ -13348,7 +13348,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i64( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -13374,7 +13374,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i64(half*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i64(,, half*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64(,, half*, , , i64, i64) define @test_vluxseg2_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { @@ -13385,7 +13385,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i64( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -13404,7 +13404,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32(half*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32(,, half*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(,, half*, , , i64, i64) define @test_vluxseg2_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { @@ -13415,7 +13415,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -13434,7 +13434,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16(half*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16(,, half*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(,, half*, , , i64, i64) define @test_vluxseg2_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { @@ -13445,7 +13445,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -13464,7 +13464,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8(half*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8(,, half*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8(,, half*, , , i64, i64) define @test_vluxseg2_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { @@ -13475,7 +13475,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -13494,7 +13494,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i64(half*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i64(,,, half*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64(,,, half*, , , i64, i64) define @test_vluxseg3_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { @@ -13505,7 +13505,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i64( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -13526,7 +13526,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32(half*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32(,,, half*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(,,, half*, , , i64, i64) define @test_vluxseg3_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { @@ -13537,7 +13537,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -13558,7 +13558,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16(half*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16(,,, half*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(,,, half*, , , i64, i64) define @test_vluxseg3_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { @@ -13569,7 +13569,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -13590,7 +13590,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8(half*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8(,,, half*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(,,, half*, , , i64, i64) define @test_vluxseg3_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { @@ -13601,7 +13601,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -13622,7 +13622,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i64(half*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i64(,,,, half*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { @@ -13633,7 +13633,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i64( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -13655,7 +13655,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32(half*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32(,,,, half*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { @@ -13666,7 +13666,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -13688,7 +13688,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16(half*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16(,,,, half*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { @@ -13699,7 +13699,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -13721,7 +13721,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8(half*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8(,,,, half*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { @@ -13732,7 +13732,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -13754,7 +13754,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i64(half*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i64(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { @@ -13765,7 +13765,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i64( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -13788,7 +13788,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32(half*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { @@ -13799,7 +13799,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -13822,7 +13822,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16(half*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { @@ -13833,7 +13833,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -13856,7 +13856,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8(half*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { @@ -13867,7 +13867,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -13890,7 +13890,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i64(half*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i64(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { @@ -13901,7 +13901,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i64( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -13925,7 +13925,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { @@ -13936,7 +13936,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -13960,7 +13960,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { @@ -13971,7 +13971,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -13995,7 +13995,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { @@ -14006,7 +14006,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -14030,7 +14030,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i64(half*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i64(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { @@ -14041,7 +14041,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i64( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -14066,7 +14066,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { @@ -14077,7 +14077,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -14102,7 +14102,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { @@ -14113,7 +14113,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -14138,7 +14138,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { @@ -14149,7 +14149,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -14174,7 +14174,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i64(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i64(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { @@ -14185,7 +14185,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -14211,7 +14211,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { @@ -14222,7 +14222,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -14248,7 +14248,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { @@ -14259,7 +14259,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -14285,7 +14285,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { @@ -14296,7 +14296,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -14322,7 +14322,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i64(float*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i64(,, float*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64(,, float*, , , i64, i64) define @test_vluxseg2_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { @@ -14333,7 +14333,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i64( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -14352,7 +14352,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32(float*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32(,, float*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(,, float*, , , i64, i64) define @test_vluxseg2_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { @@ -14363,7 +14363,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -14382,7 +14382,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16(float*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16(,, float*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(,, float*, , , i64, i64) define @test_vluxseg2_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { @@ -14393,7 +14393,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -14412,7 +14412,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8(float*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8(,, float*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8(,, float*, , , i64, i64) define @test_vluxseg2_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { @@ -14423,7 +14423,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -14442,7 +14442,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i64(float*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i64(,,, float*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64(,,, float*, , , i64, i64) define @test_vluxseg3_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { @@ -14453,7 +14453,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i64( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -14474,7 +14474,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32(float*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32(,,, float*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(,,, float*, , , i64, i64) define @test_vluxseg3_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { @@ -14485,7 +14485,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -14506,7 +14506,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16(float*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16(,,, float*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(,,, float*, , , i64, i64) define @test_vluxseg3_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { @@ -14517,7 +14517,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -14538,7 +14538,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8(float*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8(,,, float*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(,,, float*, , , i64, i64) define @test_vluxseg3_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { @@ -14549,7 +14549,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -14570,7 +14570,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i64(float*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i64(,,,, float*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { @@ -14581,7 +14581,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i64( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -14603,7 +14603,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32(float*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32(,,,, float*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { @@ -14614,7 +14614,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -14636,7 +14636,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16(float*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16(,,,, float*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { @@ -14647,7 +14647,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -14669,7 +14669,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8(float*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8(,,,, float*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { @@ -14680,7 +14680,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -14702,7 +14702,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i64(float*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i64(,,,,, float*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64(,,,,, float*, , , i64, i64) define @test_vluxseg5_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { @@ -14713,7 +14713,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i64( undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -14736,7 +14736,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32(float*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32(,,,,, float*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32(,,,,, float*, , , i64, i64) define @test_vluxseg5_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { @@ -14747,7 +14747,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -14770,7 +14770,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16(float*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16(,,,,, float*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16(,,,,, float*, , , i64, i64) define @test_vluxseg5_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { @@ -14781,7 +14781,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -14804,7 +14804,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8(float*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8(,,,,, float*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8(,,,,, float*, , , i64, i64) define @test_vluxseg5_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { @@ -14815,7 +14815,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -14838,7 +14838,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i64(float*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i64(,,,,,, float*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64(,,,,,, float*, , , i64, i64) define @test_vluxseg6_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { @@ -14849,7 +14849,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i64( undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -14873,7 +14873,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32(float*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32(,,,,,, float*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32(,,,,,, float*, , , i64, i64) define @test_vluxseg6_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { @@ -14884,7 +14884,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -14908,7 +14908,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16(float*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16(,,,,,, float*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16(,,,,,, float*, , , i64, i64) define @test_vluxseg6_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { @@ -14919,7 +14919,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -14943,7 +14943,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8(float*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8(,,,,,, float*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8(,,,,,, float*, , , i64, i64) define @test_vluxseg6_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { @@ -14954,7 +14954,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -14978,7 +14978,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i64(float*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i64(,,,,,,, float*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64(,,,,,,, float*, , , i64, i64) define @test_vluxseg7_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { @@ -14989,7 +14989,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i64( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -15014,7 +15014,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32(float*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32(,,,,,,, float*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32(,,,,,,, float*, , , i64, i64) define @test_vluxseg7_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { @@ -15025,7 +15025,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -15050,7 +15050,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16(float*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16(,,,,,,, float*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16(,,,,,,, float*, , , i64, i64) define @test_vluxseg7_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { @@ -15061,7 +15061,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -15086,7 +15086,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8(float*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8(,,,,,,, float*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8(,,,,,,, float*, , , i64, i64) define @test_vluxseg7_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { @@ -15097,7 +15097,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8( undef, undef, undef, undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -15122,7 +15122,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i64(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i64(,,,,,,,, float*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64(,,,,,,,, float*, , , i64, i64) define @test_vluxseg8_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { @@ -15133,7 +15133,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i64( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -15159,7 +15159,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32(,,,,,,,, float*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, float*, , , i64, i64) define @test_vluxseg8_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { @@ -15170,7 +15170,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -15196,7 +15196,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16(,,,,,,,, float*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, float*, , , i64, i64) define @test_vluxseg8_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { @@ -15207,7 +15207,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -15233,7 +15233,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8(,,,,,,,, float*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, float*, , , i64, i64) define @test_vluxseg8_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { @@ -15244,7 +15244,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8( undef, undef , undef , undef, undef , undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -15270,7 +15270,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(half*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(,, half*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16(,, half*, , , i64, i64) define @test_vluxseg2_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { @@ -15281,7 +15281,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15300,7 +15300,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(half*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(,, half*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8(,, half*, , , i64, i64) define @test_vluxseg2_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { @@ -15311,7 +15311,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15330,7 +15330,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i64(half*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i64(,, half*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64(,, half*, , , i64, i64) define @test_vluxseg2_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { @@ -15341,7 +15341,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i64( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15360,7 +15360,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(half*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(,, half*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32(,, half*, , , i64, i64) define @test_vluxseg2_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { @@ -15371,7 +15371,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15390,7 +15390,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(half*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(,,, half*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(,,, half*, , , i64, i64) define @test_vluxseg3_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { @@ -15401,7 +15401,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -15422,7 +15422,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(half*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(,,, half*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8(,,, half*, , , i64, i64) define @test_vluxseg3_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { @@ -15433,7 +15433,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -15454,7 +15454,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i64(half*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i64(,,, half*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64(,,, half*, , , i64, i64) define @test_vluxseg3_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { @@ -15465,7 +15465,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i64( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -15485,7 +15485,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(half*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(,,, half*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32(,,, half*, , , i64, i64) define @test_vluxseg3_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { @@ -15496,7 +15496,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -15516,7 +15516,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(half*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(,,,, half*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { @@ -15527,7 +15527,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -15549,7 +15549,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(half*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(,,,, half*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { @@ -15560,7 +15560,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -15582,7 +15582,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i64(half*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i64(,,,, half*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { @@ -15593,7 +15593,7 @@ ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i64( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -15614,7 +15614,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(half*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(,,,, half*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { @@ -15625,7 +15625,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -15647,7 +15647,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(float*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(,, float*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(,, float*, , , i64, i64) define @test_vluxseg2_nxv8f32_nxv8i16(float* %base, %index, i64 %vl) { @@ -15658,7 +15658,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15677,7 +15677,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(float*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(,, float*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8(,, float*, , , i64, i64) define @test_vluxseg2_nxv8f32_nxv8i8(float* %base, %index, i64 %vl) { @@ -15688,7 +15688,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15707,7 +15707,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i64(float*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i64(,, float*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64(,, float*, , , i64, i64) define @test_vluxseg2_nxv8f32_nxv8i64(float* %base, %index, i64 %vl) { @@ -15718,7 +15718,7 @@ ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i64(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i64( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15737,7 +15737,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(float*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(,, float*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32(,, float*, , , i64, i64) define @test_vluxseg2_nxv8f32_nxv8i32(float* %base, %index, i64 %vl) { @@ -15748,7 +15748,7 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15767,7 +15767,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(double*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(,, double*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(,, double*, , , i64, i64) define @test_vluxseg2_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { @@ -15778,7 +15778,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15797,7 +15797,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(double*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(,, double*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(,, double*, , , i64, i64) define @test_vluxseg2_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { @@ -15808,7 +15808,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15827,7 +15827,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(double*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(,, double*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(,, double*, , , i64, i64) define @test_vluxseg2_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { @@ -15838,7 +15838,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15857,7 +15857,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i64(double*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i64(,, double*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64(,, double*, , , i64, i64) define @test_vluxseg2_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { @@ -15868,7 +15868,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i64( undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -15887,7 +15887,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(double*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(,,, double*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(,,, double*, , , i64, i64) define @test_vluxseg3_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { @@ -15898,7 +15898,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32( undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -15919,7 +15919,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(double*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(,,, double*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(,,, double*, , , i64, i64) define @test_vluxseg3_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { @@ -15930,7 +15930,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8( undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -15951,7 +15951,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(double*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(,,, double*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(,,, double*, , , i64, i64) define @test_vluxseg3_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { @@ -15962,7 +15962,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16( undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -15983,7 +15983,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i64(double*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i64(,,, double*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64(,,, double*, , , i64, i64) define @test_vluxseg3_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { @@ -15994,7 +15994,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i64( undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -16015,7 +16015,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(double*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(,,,, double*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32(,,,, double*, , , i64, i64) define @test_vluxseg4_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { @@ -16026,7 +16026,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32( undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -16048,7 +16048,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(double*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(,,,, double*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8(,,,, double*, , , i64, i64) define @test_vluxseg4_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { @@ -16059,7 +16059,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8( undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -16081,7 +16081,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(double*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(,,,, double*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16(,,,, double*, , , i64, i64) define @test_vluxseg4_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { @@ -16092,7 +16092,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16( undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -16114,7 +16114,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i64(double*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i64(,,,, double*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64(,,,, double*, , , i64, i64) define @test_vluxseg4_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { @@ -16125,7 +16125,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i64( undef, undef, undef, undef, double* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -16147,7 +16147,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(half*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(,, half*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(,, half*, , , i64, i64) define @test_vluxseg2_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { @@ -16158,7 +16158,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -16177,7 +16177,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(half*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(,, half*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(,, half*, , , i64, i64) define @test_vluxseg2_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { @@ -16188,7 +16188,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -16207,7 +16207,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i64(half*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i64(,, half*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64(,, half*, , , i64, i64) define @test_vluxseg2_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { @@ -16218,7 +16218,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i64( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -16237,7 +16237,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(half*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(,, half*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(,, half*, , , i64, i64) define @test_vluxseg2_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { @@ -16248,7 +16248,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -16267,7 +16267,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(half*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(,,, half*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(,,, half*, , , i64, i64) define @test_vluxseg3_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { @@ -16278,7 +16278,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -16298,7 +16298,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(half*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(,,, half*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(,,, half*, , , i64, i64) define @test_vluxseg3_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { @@ -16309,7 +16309,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -16330,7 +16330,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i64(half*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i64(,,, half*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64(,,, half*, , , i64, i64) define @test_vluxseg3_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { @@ -16341,7 +16341,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i64( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -16361,7 +16361,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(half*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(,,, half*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(,,, half*, , , i64, i64) define @test_vluxseg3_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { @@ -16372,7 +16372,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -16393,7 +16393,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(half*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(,,,, half*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { @@ -16404,7 +16404,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -16426,7 +16426,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(half*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(,,,, half*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { @@ -16437,7 +16437,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -16459,7 +16459,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i64(half*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i64(,,,, half*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { @@ -16470,7 +16470,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i64( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -16491,7 +16491,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(half*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(,,,, half*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { @@ -16502,7 +16502,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -16524,7 +16524,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(half*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { @@ -16535,7 +16535,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -16558,7 +16558,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(half*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { @@ -16569,7 +16569,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -16592,7 +16592,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i64(half*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i64(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { @@ -16603,7 +16603,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i64( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -16625,7 +16625,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(half*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { @@ -16636,7 +16636,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -16659,7 +16659,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { @@ -16670,7 +16670,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -16694,7 +16694,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { @@ -16705,7 +16705,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -16729,7 +16729,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i64(half*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i64(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { @@ -16740,7 +16740,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i64( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -16764,7 +16764,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { @@ -16775,7 +16775,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -16799,7 +16799,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { @@ -16810,7 +16810,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -16835,7 +16835,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { @@ -16846,7 +16846,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -16871,7 +16871,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i64(half*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i64(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { @@ -16882,7 +16882,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i64( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -16907,7 +16907,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { @@ -16918,7 +16918,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -16943,7 +16943,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { @@ -16954,7 +16954,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -16980,7 +16980,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { @@ -16991,7 +16991,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -17017,7 +17017,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i64(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i64(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { @@ -17028,7 +17028,7 @@ ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i64( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -17054,7 +17054,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { @@ -17065,7 +17065,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -17091,7 +17091,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32(half*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32(,, half*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32(,, half*, , , i64, i64) define @test_vluxseg2_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { @@ -17102,7 +17102,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -17121,7 +17121,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8(half*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8(,, half*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(,, half*, , , i64, i64) define @test_vluxseg2_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { @@ -17132,7 +17132,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -17151,7 +17151,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16(half*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16(,, half*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(,, half*, , , i64, i64) define @test_vluxseg2_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { @@ -17162,7 +17162,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -17181,7 +17181,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i64(half*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i64(,, half*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64(,, half*, , , i64, i64) define @test_vluxseg2_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { @@ -17192,7 +17192,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i64( undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -17211,7 +17211,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32(half*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32(,,, half*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(,,, half*, , , i64, i64) define @test_vluxseg3_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { @@ -17222,7 +17222,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -17243,7 +17243,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8(half*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8(,,, half*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(,,, half*, , , i64, i64) define @test_vluxseg3_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { @@ -17254,7 +17254,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -17275,7 +17275,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16(half*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16(,,, half*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(,,, half*, , , i64, i64) define @test_vluxseg3_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { @@ -17286,7 +17286,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -17307,7 +17307,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i64(half*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i64(,,, half*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64(,,, half*, , , i64, i64) define @test_vluxseg3_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { @@ -17318,7 +17318,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i64( undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -17338,7 +17338,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32(half*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32(,,,, half*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { @@ -17349,7 +17349,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -17371,7 +17371,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8(half*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8(,,,, half*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { @@ -17382,7 +17382,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -17404,7 +17404,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16(half*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16(,,,, half*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { @@ -17415,7 +17415,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -17437,7 +17437,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i64(half*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i64(,,,, half*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64(,,,, half*, , , i64, i64) define @test_vluxseg4_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { @@ -17448,7 +17448,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i64( undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -17470,7 +17470,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32(half*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { @@ -17481,7 +17481,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -17504,7 +17504,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8(half*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { @@ -17515,7 +17515,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -17538,7 +17538,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16(half*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { @@ -17549,7 +17549,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -17572,7 +17572,7 @@ ret %1 } -declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i64(half*, , i64) +declare {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i64(,,,,, half*, , i64) declare {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64(,,,,, half*, , , i64, i64) define @test_vluxseg5_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { @@ -17583,7 +17583,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i64( undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } @@ -17606,7 +17606,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { @@ -17617,7 +17617,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -17641,7 +17641,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { @@ -17652,7 +17652,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -17676,7 +17676,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { @@ -17687,7 +17687,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -17711,7 +17711,7 @@ ret %1 } -declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i64(half*, , i64) +declare {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i64(,,,,,, half*, , i64) declare {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64(,,,,,, half*, , , i64, i64) define @test_vluxseg6_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { @@ -17722,7 +17722,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i64( undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } @@ -17746,7 +17746,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { @@ -17757,7 +17757,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -17782,7 +17782,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { @@ -17793,7 +17793,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -17818,7 +17818,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { @@ -17829,7 +17829,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -17854,7 +17854,7 @@ ret %1 } -declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i64(half*, , i64) +declare {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i64(,,,,,,, half*, , i64) declare {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64(,,,,,,, half*, , , i64, i64) define @test_vluxseg7_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { @@ -17865,7 +17865,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i64( undef, undef, undef, undef, undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } @@ -17890,7 +17890,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { @@ -17901,7 +17901,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -17927,7 +17927,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { @@ -17938,7 +17938,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -17964,7 +17964,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { @@ -17975,7 +17975,7 @@ ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -18001,7 +18001,7 @@ ret %1 } -declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i64(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i64(,,,,,,,, half*, , i64) declare {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64(,,,,,,,, half*, , , i64, i64) define @test_vluxseg8_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { @@ -18012,7 +18012,7 @@ ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i64( undef, undef , undef , undef, undef , undef, undef, undef, half* %base, %index, i64 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } @@ -18038,7 +18038,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(float*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(,, float*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(,, float*, , , i64, i64) define @test_vluxseg2_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { @@ -18049,7 +18049,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -18068,7 +18068,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(float*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(,, float*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(,, float*, , , i64, i64) define @test_vluxseg2_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { @@ -18079,7 +18079,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -18098,7 +18098,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i64(float*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i64(,, float*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64(,, float*, , , i64, i64) define @test_vluxseg2_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { @@ -18109,7 +18109,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i64( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -18128,7 +18128,7 @@ ret %1 } -declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(float*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(,, float*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16(,, float*, , , i64, i64) define @test_vluxseg2_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { @@ -18139,7 +18139,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16( undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,} %0, 1 ret %1 } @@ -18158,7 +18158,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(float*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(,,, float*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(,,, float*, , , i64, i64) define @test_vluxseg3_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { @@ -18169,7 +18169,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -18190,7 +18190,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(float*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(,,, float*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(,,, float*, , , i64, i64) define @test_vluxseg3_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { @@ -18201,7 +18201,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -18222,7 +18222,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i64(float*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i64(,,, float*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64(,,, float*, , , i64, i64) define @test_vluxseg3_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { @@ -18233,7 +18233,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i64( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -18253,7 +18253,7 @@ ret %1 } -declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(float*, , i64) +declare {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(,,, float*, , i64) declare {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(,,, float*, , , i64, i64) define @test_vluxseg3_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { @@ -18264,7 +18264,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) + %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16( undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } @@ -18285,7 +18285,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(float*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(,,,, float*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { @@ -18296,7 +18296,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -18318,7 +18318,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(float*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(,,,, float*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { @@ -18329,7 +18329,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -18351,7 +18351,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i64(float*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i64(,,,, float*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { @@ -18362,7 +18362,7 @@ ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i64( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } @@ -18384,7 +18384,7 @@ ret %1 } -declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(float*, , i64) +declare {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(,,,, float*, , i64) declare {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16(,,,, float*, , , i64, i64) define @test_vluxseg4_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { @@ -18395,7 +18395,7 @@ ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) + %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16( undef, undef, undef, undef, float* %base, %index, i64 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll --- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll @@ -5,7 +5,7 @@ ; Make sure we don't select a 0 vl to X0 in the custom isel handlers we use ; for these intrinsics. -declare {,} @llvm.riscv.vlseg2.nxv16i16(i16* , i64) +declare {,} @llvm.riscv.vlseg2.nxv16i16(,, i16*, i64) declare {,} @llvm.riscv.vlseg2.mask.nxv16i16(,, i16*, , i64, i64) define @test_vlseg2_mask_nxv16i16(i16* %base, %mask) { @@ -18,14 +18,14 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i64 0) + %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16( undef, undef, i16* %base, i64 0) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlseg2.mask.nxv16i16( %1, %1, i16* %base, %mask, i64 0, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vlsseg2.nxv16i16(i16*, i64, i64) +declare {,} @llvm.riscv.vlsseg2.nxv16i16(,, i16*, i64, i64) declare {,} @llvm.riscv.vlsseg2.mask.nxv16i16(,, i16*, i64, , i64, i64) define @test_vlsseg2_mask_nxv16i16(i16* %base, i64 %offset, %mask) { @@ -38,13 +38,13 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i64 %offset, i64 0) + %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16( undef, undef, i16* %base, i64 %offset, i64 0) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vlsseg2.mask.nxv16i16( %1, %1, i16* %base, i64 %offset, %mask, i64 0, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16*, , i64) +declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(,, i16*, , i64) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i64, i64) define @test_vloxseg2_mask_nxv16i16_nxv16i16(i16* %base, %index, %mask) { @@ -57,14 +57,14 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 0) + %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16( undef, undef, i16* %base, %index, i64 0) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i64 0, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16*, , i64) +declare {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(,, i16*, , i64) declare {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i64, i64) define @test_vluxseg2_mask_nxv16i16_nxv16i16(i16* %base, %index, %mask) { @@ -77,14 +77,14 @@ ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 0) + %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16( undef, undef, i16* %base, %index, i64 0) %1 = extractvalue {,} %0, 0 %2 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i64 0, i64 1) %3 = extractvalue {,} %2, 1 ret %3 } -declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i64) +declare {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(,, i16* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(,, i16*, , i64, i64) define @test_vlseg2ff_nxv16i16(i16* %base, i64* %outvl) { @@ -97,7 +97,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 0) + %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16( undef, undef, i16* %base, i64 0) %1 = extractvalue {,, i64} %0, 1 %2 = extractvalue {,, i64} %0, 2 store i64 %2, i64* %outvl